import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import zipfile
import cv2
from skimage import io
import tensorflow as tf
from tensorflow.python.keras import Sequential
from tensorflow.keras import layers, optimizers
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.initializers import glorot_uniform
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, LearningRateScheduler
import tensorflow.keras.backend as K
import random
import glob
from sklearn.preprocessing import StandardScaler, normalize
from IPython.display import display
data = pd.read_csv('brain_dataset/lgg-mri-segmentation/kaggle_3m/data.csv')
data.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 110 entries, 0 to 109 Data columns (total 18 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Patient 110 non-null object 1 RNASeqCluster 92 non-null float64 2 MethylationCluster 109 non-null float64 3 miRNACluster 110 non-null int64 4 CNCluster 108 non-null float64 5 RPPACluster 98 non-null float64 6 OncosignCluster 105 non-null float64 7 COCCluster 110 non-null int64 8 histological_type 109 non-null float64 9 neoplasm_histologic_grade 109 non-null float64 10 tumor_tissue_site 109 non-null float64 11 laterality 109 non-null float64 12 tumor_location 109 non-null float64 13 gender 109 non-null float64 14 age_at_initial_pathologic 109 non-null float64 15 race 108 non-null float64 16 ethnicity 102 non-null float64 17 death01 109 non-null float64 dtypes: float64(15), int64(2), object(1) memory usage: 15.6+ KB
data.head(10)
| Patient | RNASeqCluster | MethylationCluster | miRNACluster | CNCluster | RPPACluster | OncosignCluster | COCCluster | histological_type | neoplasm_histologic_grade | tumor_tissue_site | laterality | tumor_location | gender | age_at_initial_pathologic | race | ethnicity | death01 | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | TCGA_CS_4941 | 2.0 | 4.0 | 2 | 2.0 | NaN | 3.0 | 2 | 1.0 | 2.0 | 1.0 | 3.0 | 2.0 | 2.0 | 67.0 | 3.0 | 2.0 | 1.0 |
| 1 | TCGA_CS_4942 | 1.0 | 5.0 | 2 | 1.0 | 1.0 | 2.0 | 1 | 1.0 | 2.0 | 1.0 | 3.0 | 2.0 | 1.0 | 44.0 | 2.0 | NaN | 1.0 |
| 2 | TCGA_CS_4943 | 1.0 | 5.0 | 2 | 1.0 | 2.0 | 2.0 | 1 | 1.0 | 2.0 | 1.0 | 1.0 | 2.0 | 2.0 | 37.0 | 3.0 | NaN | 0.0 |
| 3 | TCGA_CS_4944 | NaN | 5.0 | 2 | 1.0 | 2.0 | 1.0 | 1 | 1.0 | 1.0 | 1.0 | 3.0 | 6.0 | 2.0 | 50.0 | 3.0 | NaN | 0.0 |
| 4 | TCGA_CS_5393 | 4.0 | 5.0 | 2 | 1.0 | 2.0 | 3.0 | 1 | 1.0 | 2.0 | 1.0 | 1.0 | 6.0 | 2.0 | 39.0 | 3.0 | NaN | 0.0 |
| 5 | TCGA_CS_5395 | 2.0 | 4.0 | 2 | 2.0 | NaN | 3.0 | 2 | 3.0 | 1.0 | 1.0 | 3.0 | 5.0 | 2.0 | 43.0 | 2.0 | NaN | 1.0 |
| 6 | TCGA_CS_5396 | 3.0 | 3.0 | 2 | 3.0 | 2.0 | 2.0 | 3 | 3.0 | 2.0 | 1.0 | 3.0 | 2.0 | 1.0 | 53.0 | 3.0 | 2.0 | 0.0 |
| 7 | TCGA_CS_5397 | NaN | 4.0 | 1 | 2.0 | 3.0 | 3.0 | 2 | 1.0 | 2.0 | 1.0 | 1.0 | 6.0 | 1.0 | 54.0 | 3.0 | 2.0 | 1.0 |
| 8 | TCGA_CS_6186 | 2.0 | 4.0 | 1 | 2.0 | 1.0 | 3.0 | 2 | 2.0 | 2.0 | 1.0 | 3.0 | 2.0 | 2.0 | 58.0 | 3.0 | 2.0 | 1.0 |
| 9 | TCGA_CS_6188 | 2.0 | 4.0 | 3 | 2.0 | 3.0 | 3.0 | 2 | 1.0 | 2.0 | 1.0 | 3.0 | 6.0 | 2.0 | 48.0 | 3.0 | 2.0 | 0.0 |
data_map = []
for sub_dir_path in glob.glob("brain_dataset/lgg-mri-segmentation/kaggle_3m/"+"*"):
#if os.path.isdir(sub_path_dir):
try:
dir_name = sub_dir_path.split('/')[-1]
for filename in os.listdir(sub_dir_path):
image_path = sub_dir_path + '/' + filename
data_map.extend([dir_name, image_path])
except Exception as e:
print(e)
[Errno 20] Not a directory: 'brain_dataset/lgg-mri-segmentation/kaggle_3m/data.csv' [Errno 20] Not a directory: 'brain_dataset/lgg-mri-segmentation/kaggle_3m/README.md'
df = pd.DataFrame({"patient_id" : data_map[::2],
"path" : data_map[1::2]})
df.head()
| patient_id | path | |
|---|---|---|
| 0 | TCGA_CS_6667_20011105 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... |
| 1 | TCGA_CS_6667_20011105 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... |
| 2 | TCGA_CS_6667_20011105 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... |
| 3 | TCGA_CS_6667_20011105 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... |
| 4 | TCGA_CS_6667_20011105 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... |
df_imgs = df[~df['path'].str.contains("mask")]
df_masks = df[df['path'].str.contains("mask")]
# File path line length images for later sorting
BASE_LEN = 89
END_IMG_LEN = 4
END_MASK_LEN = 9
# Data sorting
imgs = sorted(df_imgs["path"].values, key=lambda x : int(x[BASE_LEN:-END_IMG_LEN]))
masks = sorted(df_masks["path"].values, key=lambda x : int(x[BASE_LEN:-END_MASK_LEN]))
# Sorting check
idx = random.randint(0, len(imgs)-1)
print("Path to the Image:", imgs[idx], "\nPath to the Mask:", masks[idx])
Path to the Image: brain_dataset/lgg-mri-segmentation/kaggle_3m/TCGA_FG_5964_20010511/TCGA_FG_5964_20010511_18.tif Path to the Mask: brain_dataset/lgg-mri-segmentation/kaggle_3m/TCGA_FG_5964_20010511/TCGA_FG_5964_20010511_18_mask.tif
# Final dataframe
brain_df = pd.DataFrame({"patient_id": df_imgs.patient_id.values,
"image_path": imgs,
"mask_path": masks
})
def pos_neg_diagnosis(mask_path):
value = np.max(cv2.imread(mask_path))
if value > 0 :
return 1
else:
return 0
brain_df['mask'] = brain_df['mask_path'].apply(lambda x: pos_neg_diagnosis(x))
brain_df
| patient_id | image_path | mask_path | mask | |
|---|---|---|---|---|
| 0 | TCGA_CS_6667_20011105 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 |
| 1 | TCGA_CS_6667_20011105 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 |
| 2 | TCGA_CS_6667_20011105 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 |
| 3 | TCGA_CS_6667_20011105 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 |
| 4 | TCGA_CS_6667_20011105 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 |
| ... | ... | ... | ... | ... |
| 3924 | TCGA_FG_A60K_20040224 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 |
| 3925 | TCGA_FG_A60K_20040224 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 |
| 3926 | TCGA_FG_A60K_20040224 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 |
| 3927 | TCGA_FG_A60K_20040224 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 |
| 3928 | TCGA_FG_A60K_20040224 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 |
3929 rows × 4 columns
brain_df['mask'].value_counts()
0 2556 1 1373 Name: mask, dtype: int64
import plotly.graph_objects as go # using plotly to create interactive plots
fig = go.Figure([go.Bar(x=brain_df['mask'].value_counts().index,
y=brain_df['mask'].value_counts(),
width=[.4, .4]
)
])
fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)',
marker_line_width=4, opacity=0.4
)
fig.update_layout(title_text="Mask Count Plot",
width=700,
height=550,
yaxis=dict(
title_text="Count",
tickmode="array",
titlefont=dict(size=20)
)
)
fig.update_yaxes(automargin=True)
fig.show()
for i in range(len(brain_df)):
if cv2.imread(brain_df.mask_path[i]).max() > 0:
break
plt.figure(figsize=(8,8))
plt.subplot(1,2,1)
plt.imshow(cv2.imread(brain_df.mask_path[i]));
plt.title('Tumor Location')
plt.subplot(1,2,2)
plt.imshow(cv2.imread(brain_df.image_path[i]));
cv2.imread(brain_df.mask_path[i]).max(), cv2.imread(brain_df.mask_path[i]).min()
(255, 0)
# Basic visualizations: Visualize the images (MRI and Mask) in the dataset separately
fig, axs = plt.subplots(6,2, figsize=(16,26))
count = 0
for x in range(6):
i = random.randint(0, len(brain_df)) # select a random index
axs[count][0].title.set_text("Brain MRI") # set title
axs[count][0].imshow(cv2.imread(brain_df.image_path[i])) # show MRI
axs[count][1].title.set_text("Mask - " + str(brain_df['mask'][i])) # plot title on the mask (0 or 1)
axs[count][1].imshow(cv2.imread(brain_df.mask_path[i])) # Show corresponding mask
count += 1
fig.tight_layout()
count = 0
i = 0
fig,axs = plt.subplots(12,3, figsize=(20,50))
for mask in brain_df['mask']:
if (mask==1):
img = io.imread(brain_df.image_path[i])
axs[count][0].title.set_text("Brain MRI")
axs[count][0].imshow(img)
mask = io.imread(brain_df.mask_path[i])
axs[count][1].title.set_text("Mask")
axs[count][1].imshow(mask, cmap='gray')
img[mask==255] = (0,255,150) # change pixel color at the position of mask
axs[count][2].title.set_text("MRI with Mask")
axs[count][2].imshow(img)
count +=1
i += 1
if (count==12):
break
fig.tight_layout()
brain_df_train = brain_df.drop(columns=['patient_id'])
# Convert the data in mask column to string format, to use categorical mode in flow_from_dataframe
brain_df_train['mask'] = brain_df_train['mask'].apply(lambda x: str(x))
brain_df_train.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 3929 entries, 0 to 3928 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 image_path 3929 non-null object 1 mask_path 3929 non-null object 2 mask 3929 non-null object dtypes: object(3) memory usage: 92.2+ KB
from sklearn.model_selection import train_test_split
train, test = train_test_split(brain_df_train, test_size=0.15)
from keras_preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(rescale=1./255., validation_split=0.1)
train_generator = datagen.flow_from_dataframe(train,
directory='./',
x_col='image_path',
y_col='mask',
subset='training',
class_mode='categorical',
batch_size=16,
shuffle=True,
target_size=(256,256)
)
valid_generator = datagen.flow_from_dataframe(train,
directory='./',
x_col='image_path',
y_col='mask',
subset='validation',
class_mode='categorical',
batch_size=16,
shuffle=True,
target_size=(256,256)
)
test_datagen = ImageDataGenerator(rescale=1./255.)
test_generator = test_datagen.flow_from_dataframe(test,
directory='./',
x_col='image_path',
y_col='mask',
class_mode='categorical',
batch_size=16,
shuffle=False,
target_size=(256,256)
)
Found 3006 validated image filenames belonging to 2 classes. Found 333 validated image filenames belonging to 2 classes. Found 590 validated image filenames belonging to 2 classes.
from tensorflow.keras.applications.resnet50 import ResNet50
clf_model = ResNet50(weights='imagenet', include_top=False, input_tensor=Input(shape=(256,256,3)))
clf_model.summary()
2022-06-10 17:33:42.719893: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:305] Could not identify NUMA node of platform GPU ID 0, defaulting to 0. Your kernel may not have been built with NUMA support. 2022-06-10 17:33:42.720128: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:271] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 0 MB memory) -> physical PluggableDevice (device: 0, name: METAL, pci bus id: <undefined>)
Metal device set to: Apple M1
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5
94773248/94765736 [==============================] - 188s 2us/step
94781440/94765736 [==============================] - 188s 2us/step
Model: "resnet50"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 256, 256, 3 0 []
)]
conv1_pad (ZeroPadding2D) (None, 262, 262, 3) 0 ['input_1[0][0]']
conv1_conv (Conv2D) (None, 128, 128, 64 9472 ['conv1_pad[0][0]']
)
conv1_bn (BatchNormalization) (None, 128, 128, 64 256 ['conv1_conv[0][0]']
)
conv1_relu (Activation) (None, 128, 128, 64 0 ['conv1_bn[0][0]']
)
pool1_pad (ZeroPadding2D) (None, 130, 130, 64 0 ['conv1_relu[0][0]']
)
pool1_pool (MaxPooling2D) (None, 64, 64, 64) 0 ['pool1_pad[0][0]']
conv2_block1_1_conv (Conv2D) (None, 64, 64, 64) 4160 ['pool1_pool[0][0]']
conv2_block1_1_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block1_1_conv[0][0]']
ization)
conv2_block1_1_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block1_1_bn[0][0]']
n)
conv2_block1_2_conv (Conv2D) (None, 64, 64, 64) 36928 ['conv2_block1_1_relu[0][0]']
conv2_block1_2_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block1_2_conv[0][0]']
ization)
conv2_block1_2_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block1_2_bn[0][0]']
n)
conv2_block1_0_conv (Conv2D) (None, 64, 64, 256) 16640 ['pool1_pool[0][0]']
conv2_block1_3_conv (Conv2D) (None, 64, 64, 256) 16640 ['conv2_block1_2_relu[0][0]']
conv2_block1_0_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv2_block1_0_conv[0][0]']
ization)
conv2_block1_3_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv2_block1_3_conv[0][0]']
ization)
conv2_block1_add (Add) (None, 64, 64, 256) 0 ['conv2_block1_0_bn[0][0]',
'conv2_block1_3_bn[0][0]']
conv2_block1_out (Activation) (None, 64, 64, 256) 0 ['conv2_block1_add[0][0]']
conv2_block2_1_conv (Conv2D) (None, 64, 64, 64) 16448 ['conv2_block1_out[0][0]']
conv2_block2_1_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block2_1_conv[0][0]']
ization)
conv2_block2_1_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block2_1_bn[0][0]']
n)
conv2_block2_2_conv (Conv2D) (None, 64, 64, 64) 36928 ['conv2_block2_1_relu[0][0]']
conv2_block2_2_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block2_2_conv[0][0]']
ization)
conv2_block2_2_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block2_2_bn[0][0]']
n)
conv2_block2_3_conv (Conv2D) (None, 64, 64, 256) 16640 ['conv2_block2_2_relu[0][0]']
conv2_block2_3_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv2_block2_3_conv[0][0]']
ization)
conv2_block2_add (Add) (None, 64, 64, 256) 0 ['conv2_block1_out[0][0]',
'conv2_block2_3_bn[0][0]']
conv2_block2_out (Activation) (None, 64, 64, 256) 0 ['conv2_block2_add[0][0]']
conv2_block3_1_conv (Conv2D) (None, 64, 64, 64) 16448 ['conv2_block2_out[0][0]']
conv2_block3_1_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block3_1_conv[0][0]']
ization)
conv2_block3_1_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block3_1_bn[0][0]']
n)
conv2_block3_2_conv (Conv2D) (None, 64, 64, 64) 36928 ['conv2_block3_1_relu[0][0]']
conv2_block3_2_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block3_2_conv[0][0]']
ization)
conv2_block3_2_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block3_2_bn[0][0]']
n)
conv2_block3_3_conv (Conv2D) (None, 64, 64, 256) 16640 ['conv2_block3_2_relu[0][0]']
conv2_block3_3_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv2_block3_3_conv[0][0]']
ization)
conv2_block3_add (Add) (None, 64, 64, 256) 0 ['conv2_block2_out[0][0]',
'conv2_block3_3_bn[0][0]']
conv2_block3_out (Activation) (None, 64, 64, 256) 0 ['conv2_block3_add[0][0]']
conv3_block1_1_conv (Conv2D) (None, 32, 32, 128) 32896 ['conv2_block3_out[0][0]']
conv3_block1_1_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block1_1_conv[0][0]']
ization)
conv3_block1_1_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block1_1_bn[0][0]']
n)
conv3_block1_2_conv (Conv2D) (None, 32, 32, 128) 147584 ['conv3_block1_1_relu[0][0]']
conv3_block1_2_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block1_2_conv[0][0]']
ization)
conv3_block1_2_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block1_2_bn[0][0]']
n)
conv3_block1_0_conv (Conv2D) (None, 32, 32, 512) 131584 ['conv2_block3_out[0][0]']
conv3_block1_3_conv (Conv2D) (None, 32, 32, 512) 66048 ['conv3_block1_2_relu[0][0]']
conv3_block1_0_bn (BatchNormal (None, 32, 32, 512) 2048 ['conv3_block1_0_conv[0][0]']
ization)
conv3_block1_3_bn (BatchNormal (None, 32, 32, 512) 2048 ['conv3_block1_3_conv[0][0]']
ization)
conv3_block1_add (Add) (None, 32, 32, 512) 0 ['conv3_block1_0_bn[0][0]',
'conv3_block1_3_bn[0][0]']
conv3_block1_out (Activation) (None, 32, 32, 512) 0 ['conv3_block1_add[0][0]']
conv3_block2_1_conv (Conv2D) (None, 32, 32, 128) 65664 ['conv3_block1_out[0][0]']
conv3_block2_1_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block2_1_conv[0][0]']
ization)
conv3_block2_1_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block2_1_bn[0][0]']
n)
conv3_block2_2_conv (Conv2D) (None, 32, 32, 128) 147584 ['conv3_block2_1_relu[0][0]']
conv3_block2_2_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block2_2_conv[0][0]']
ization)
conv3_block2_2_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block2_2_bn[0][0]']
n)
conv3_block2_3_conv (Conv2D) (None, 32, 32, 512) 66048 ['conv3_block2_2_relu[0][0]']
conv3_block2_3_bn (BatchNormal (None, 32, 32, 512) 2048 ['conv3_block2_3_conv[0][0]']
ization)
conv3_block2_add (Add) (None, 32, 32, 512) 0 ['conv3_block1_out[0][0]',
'conv3_block2_3_bn[0][0]']
conv3_block2_out (Activation) (None, 32, 32, 512) 0 ['conv3_block2_add[0][0]']
conv3_block3_1_conv (Conv2D) (None, 32, 32, 128) 65664 ['conv3_block2_out[0][0]']
conv3_block3_1_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block3_1_conv[0][0]']
ization)
conv3_block3_1_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block3_1_bn[0][0]']
n)
conv3_block3_2_conv (Conv2D) (None, 32, 32, 128) 147584 ['conv3_block3_1_relu[0][0]']
conv3_block3_2_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block3_2_conv[0][0]']
ization)
conv3_block3_2_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block3_2_bn[0][0]']
n)
conv3_block3_3_conv (Conv2D) (None, 32, 32, 512) 66048 ['conv3_block3_2_relu[0][0]']
conv3_block3_3_bn (BatchNormal (None, 32, 32, 512) 2048 ['conv3_block3_3_conv[0][0]']
ization)
conv3_block3_add (Add) (None, 32, 32, 512) 0 ['conv3_block2_out[0][0]',
'conv3_block3_3_bn[0][0]']
conv3_block3_out (Activation) (None, 32, 32, 512) 0 ['conv3_block3_add[0][0]']
conv3_block4_1_conv (Conv2D) (None, 32, 32, 128) 65664 ['conv3_block3_out[0][0]']
conv3_block4_1_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block4_1_conv[0][0]']
ization)
conv3_block4_1_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block4_1_bn[0][0]']
n)
conv3_block4_2_conv (Conv2D) (None, 32, 32, 128) 147584 ['conv3_block4_1_relu[0][0]']
conv3_block4_2_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block4_2_conv[0][0]']
ization)
conv3_block4_2_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block4_2_bn[0][0]']
n)
conv3_block4_3_conv (Conv2D) (None, 32, 32, 512) 66048 ['conv3_block4_2_relu[0][0]']
conv3_block4_3_bn (BatchNormal (None, 32, 32, 512) 2048 ['conv3_block4_3_conv[0][0]']
ization)
conv3_block4_add (Add) (None, 32, 32, 512) 0 ['conv3_block3_out[0][0]',
'conv3_block4_3_bn[0][0]']
conv3_block4_out (Activation) (None, 32, 32, 512) 0 ['conv3_block4_add[0][0]']
conv4_block1_1_conv (Conv2D) (None, 16, 16, 256) 131328 ['conv3_block4_out[0][0]']
conv4_block1_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block1_1_conv[0][0]']
ization)
conv4_block1_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block1_1_bn[0][0]']
n)
conv4_block1_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block1_1_relu[0][0]']
conv4_block1_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block1_2_conv[0][0]']
ization)
conv4_block1_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block1_2_bn[0][0]']
n)
conv4_block1_0_conv (Conv2D) (None, 16, 16, 1024 525312 ['conv3_block4_out[0][0]']
)
conv4_block1_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block1_2_relu[0][0]']
)
conv4_block1_0_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block1_0_conv[0][0]']
ization) )
conv4_block1_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block1_3_conv[0][0]']
ization) )
conv4_block1_add (Add) (None, 16, 16, 1024 0 ['conv4_block1_0_bn[0][0]',
) 'conv4_block1_3_bn[0][0]']
conv4_block1_out (Activation) (None, 16, 16, 1024 0 ['conv4_block1_add[0][0]']
)
conv4_block2_1_conv (Conv2D) (None, 16, 16, 256) 262400 ['conv4_block1_out[0][0]']
conv4_block2_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block2_1_conv[0][0]']
ization)
conv4_block2_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block2_1_bn[0][0]']
n)
conv4_block2_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block2_1_relu[0][0]']
conv4_block2_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block2_2_conv[0][0]']
ization)
conv4_block2_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block2_2_bn[0][0]']
n)
conv4_block2_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block2_2_relu[0][0]']
)
conv4_block2_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block2_3_conv[0][0]']
ization) )
conv4_block2_add (Add) (None, 16, 16, 1024 0 ['conv4_block1_out[0][0]',
) 'conv4_block2_3_bn[0][0]']
conv4_block2_out (Activation) (None, 16, 16, 1024 0 ['conv4_block2_add[0][0]']
)
conv4_block3_1_conv (Conv2D) (None, 16, 16, 256) 262400 ['conv4_block2_out[0][0]']
conv4_block3_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block3_1_conv[0][0]']
ization)
conv4_block3_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block3_1_bn[0][0]']
n)
conv4_block3_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block3_1_relu[0][0]']
conv4_block3_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block3_2_conv[0][0]']
ization)
conv4_block3_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block3_2_bn[0][0]']
n)
conv4_block3_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block3_2_relu[0][0]']
)
conv4_block3_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block3_3_conv[0][0]']
ization) )
conv4_block3_add (Add) (None, 16, 16, 1024 0 ['conv4_block2_out[0][0]',
) 'conv4_block3_3_bn[0][0]']
conv4_block3_out (Activation) (None, 16, 16, 1024 0 ['conv4_block3_add[0][0]']
)
conv4_block4_1_conv (Conv2D) (None, 16, 16, 256) 262400 ['conv4_block3_out[0][0]']
conv4_block4_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block4_1_conv[0][0]']
ization)
conv4_block4_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block4_1_bn[0][0]']
n)
conv4_block4_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block4_1_relu[0][0]']
conv4_block4_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block4_2_conv[0][0]']
ization)
conv4_block4_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block4_2_bn[0][0]']
n)
conv4_block4_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block4_2_relu[0][0]']
)
conv4_block4_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block4_3_conv[0][0]']
ization) )
conv4_block4_add (Add) (None, 16, 16, 1024 0 ['conv4_block3_out[0][0]',
) 'conv4_block4_3_bn[0][0]']
conv4_block4_out (Activation) (None, 16, 16, 1024 0 ['conv4_block4_add[0][0]']
)
conv4_block5_1_conv (Conv2D) (None, 16, 16, 256) 262400 ['conv4_block4_out[0][0]']
conv4_block5_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block5_1_conv[0][0]']
ization)
conv4_block5_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block5_1_bn[0][0]']
n)
conv4_block5_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block5_1_relu[0][0]']
conv4_block5_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block5_2_conv[0][0]']
ization)
conv4_block5_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block5_2_bn[0][0]']
n)
conv4_block5_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block5_2_relu[0][0]']
)
conv4_block5_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block5_3_conv[0][0]']
ization) )
conv4_block5_add (Add) (None, 16, 16, 1024 0 ['conv4_block4_out[0][0]',
) 'conv4_block5_3_bn[0][0]']
conv4_block5_out (Activation) (None, 16, 16, 1024 0 ['conv4_block5_add[0][0]']
)
conv4_block6_1_conv (Conv2D) (None, 16, 16, 256) 262400 ['conv4_block5_out[0][0]']
conv4_block6_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block6_1_conv[0][0]']
ization)
conv4_block6_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block6_1_bn[0][0]']
n)
conv4_block6_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block6_1_relu[0][0]']
conv4_block6_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block6_2_conv[0][0]']
ization)
conv4_block6_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block6_2_bn[0][0]']
n)
conv4_block6_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block6_2_relu[0][0]']
)
conv4_block6_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block6_3_conv[0][0]']
ization) )
conv4_block6_add (Add) (None, 16, 16, 1024 0 ['conv4_block5_out[0][0]',
) 'conv4_block6_3_bn[0][0]']
conv4_block6_out (Activation) (None, 16, 16, 1024 0 ['conv4_block6_add[0][0]']
)
conv5_block1_1_conv (Conv2D) (None, 8, 8, 512) 524800 ['conv4_block6_out[0][0]']
conv5_block1_1_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block1_1_conv[0][0]']
ization)
conv5_block1_1_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block1_1_bn[0][0]']
n)
conv5_block1_2_conv (Conv2D) (None, 8, 8, 512) 2359808 ['conv5_block1_1_relu[0][0]']
conv5_block1_2_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block1_2_conv[0][0]']
ization)
conv5_block1_2_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block1_2_bn[0][0]']
n)
conv5_block1_0_conv (Conv2D) (None, 8, 8, 2048) 2099200 ['conv4_block6_out[0][0]']
conv5_block1_3_conv (Conv2D) (None, 8, 8, 2048) 1050624 ['conv5_block1_2_relu[0][0]']
conv5_block1_0_bn (BatchNormal (None, 8, 8, 2048) 8192 ['conv5_block1_0_conv[0][0]']
ization)
conv5_block1_3_bn (BatchNormal (None, 8, 8, 2048) 8192 ['conv5_block1_3_conv[0][0]']
ization)
conv5_block1_add (Add) (None, 8, 8, 2048) 0 ['conv5_block1_0_bn[0][0]',
'conv5_block1_3_bn[0][0]']
conv5_block1_out (Activation) (None, 8, 8, 2048) 0 ['conv5_block1_add[0][0]']
conv5_block2_1_conv (Conv2D) (None, 8, 8, 512) 1049088 ['conv5_block1_out[0][0]']
conv5_block2_1_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block2_1_conv[0][0]']
ization)
conv5_block2_1_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block2_1_bn[0][0]']
n)
conv5_block2_2_conv (Conv2D) (None, 8, 8, 512) 2359808 ['conv5_block2_1_relu[0][0]']
conv5_block2_2_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block2_2_conv[0][0]']
ization)
conv5_block2_2_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block2_2_bn[0][0]']
n)
conv5_block2_3_conv (Conv2D) (None, 8, 8, 2048) 1050624 ['conv5_block2_2_relu[0][0]']
conv5_block2_3_bn (BatchNormal (None, 8, 8, 2048) 8192 ['conv5_block2_3_conv[0][0]']
ization)
conv5_block2_add (Add) (None, 8, 8, 2048) 0 ['conv5_block1_out[0][0]',
'conv5_block2_3_bn[0][0]']
conv5_block2_out (Activation) (None, 8, 8, 2048) 0 ['conv5_block2_add[0][0]']
conv5_block3_1_conv (Conv2D) (None, 8, 8, 512) 1049088 ['conv5_block2_out[0][0]']
conv5_block3_1_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block3_1_conv[0][0]']
ization)
conv5_block3_1_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block3_1_bn[0][0]']
n)
conv5_block3_2_conv (Conv2D) (None, 8, 8, 512) 2359808 ['conv5_block3_1_relu[0][0]']
conv5_block3_2_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block3_2_conv[0][0]']
ization)
conv5_block3_2_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block3_2_bn[0][0]']
n)
conv5_block3_3_conv (Conv2D) (None, 8, 8, 2048) 1050624 ['conv5_block3_2_relu[0][0]']
conv5_block3_3_bn (BatchNormal (None, 8, 8, 2048) 8192 ['conv5_block3_3_conv[0][0]']
ization)
conv5_block3_add (Add) (None, 8, 8, 2048) 0 ['conv5_block2_out[0][0]',
'conv5_block3_3_bn[0][0]']
conv5_block3_out (Activation) (None, 8, 8, 2048) 0 ['conv5_block3_add[0][0]']
==================================================================================================
Total params: 23,587,712
Trainable params: 23,534,592
Non-trainable params: 53,120
__________________________________________________________________________________________________
# before this i tried with trainable layer but the accuracy was less as compared
for layer in clf_model.layers:
layers.trainable = False
head = clf_model.output
head = AveragePooling2D(pool_size=(4,4))(head)
head = Flatten(name='Flatten')(head)
head = Dense(256, activation='relu')(head)
head = Dropout(0.3)(head)
head = Dense(256, activation='relu')(head)
head = Dropout(0.3)(head)
head = Dense(2, activation='softmax')(head)
model = Model(clf_model.input, head)
model.compile(loss = 'categorical_crossentropy',
optimizer='adam',
metrics= ["accuracy"]
)
model.summary()
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 256, 256, 3 0 []
)]
conv1_pad (ZeroPadding2D) (None, 262, 262, 3) 0 ['input_1[0][0]']
conv1_conv (Conv2D) (None, 128, 128, 64 9472 ['conv1_pad[0][0]']
)
conv1_bn (BatchNormalization) (None, 128, 128, 64 256 ['conv1_conv[0][0]']
)
conv1_relu (Activation) (None, 128, 128, 64 0 ['conv1_bn[0][0]']
)
pool1_pad (ZeroPadding2D) (None, 130, 130, 64 0 ['conv1_relu[0][0]']
)
pool1_pool (MaxPooling2D) (None, 64, 64, 64) 0 ['pool1_pad[0][0]']
conv2_block1_1_conv (Conv2D) (None, 64, 64, 64) 4160 ['pool1_pool[0][0]']
conv2_block1_1_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block1_1_conv[0][0]']
ization)
conv2_block1_1_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block1_1_bn[0][0]']
n)
conv2_block1_2_conv (Conv2D) (None, 64, 64, 64) 36928 ['conv2_block1_1_relu[0][0]']
conv2_block1_2_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block1_2_conv[0][0]']
ization)
conv2_block1_2_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block1_2_bn[0][0]']
n)
conv2_block1_0_conv (Conv2D) (None, 64, 64, 256) 16640 ['pool1_pool[0][0]']
conv2_block1_3_conv (Conv2D) (None, 64, 64, 256) 16640 ['conv2_block1_2_relu[0][0]']
conv2_block1_0_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv2_block1_0_conv[0][0]']
ization)
conv2_block1_3_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv2_block1_3_conv[0][0]']
ization)
conv2_block1_add (Add) (None, 64, 64, 256) 0 ['conv2_block1_0_bn[0][0]',
'conv2_block1_3_bn[0][0]']
conv2_block1_out (Activation) (None, 64, 64, 256) 0 ['conv2_block1_add[0][0]']
conv2_block2_1_conv (Conv2D) (None, 64, 64, 64) 16448 ['conv2_block1_out[0][0]']
conv2_block2_1_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block2_1_conv[0][0]']
ization)
conv2_block2_1_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block2_1_bn[0][0]']
n)
conv2_block2_2_conv (Conv2D) (None, 64, 64, 64) 36928 ['conv2_block2_1_relu[0][0]']
conv2_block2_2_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block2_2_conv[0][0]']
ization)
conv2_block2_2_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block2_2_bn[0][0]']
n)
conv2_block2_3_conv (Conv2D) (None, 64, 64, 256) 16640 ['conv2_block2_2_relu[0][0]']
conv2_block2_3_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv2_block2_3_conv[0][0]']
ization)
conv2_block2_add (Add) (None, 64, 64, 256) 0 ['conv2_block1_out[0][0]',
'conv2_block2_3_bn[0][0]']
conv2_block2_out (Activation) (None, 64, 64, 256) 0 ['conv2_block2_add[0][0]']
conv2_block3_1_conv (Conv2D) (None, 64, 64, 64) 16448 ['conv2_block2_out[0][0]']
conv2_block3_1_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block3_1_conv[0][0]']
ization)
conv2_block3_1_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block3_1_bn[0][0]']
n)
conv2_block3_2_conv (Conv2D) (None, 64, 64, 64) 36928 ['conv2_block3_1_relu[0][0]']
conv2_block3_2_bn (BatchNormal (None, 64, 64, 64) 256 ['conv2_block3_2_conv[0][0]']
ization)
conv2_block3_2_relu (Activatio (None, 64, 64, 64) 0 ['conv2_block3_2_bn[0][0]']
n)
conv2_block3_3_conv (Conv2D) (None, 64, 64, 256) 16640 ['conv2_block3_2_relu[0][0]']
conv2_block3_3_bn (BatchNormal (None, 64, 64, 256) 1024 ['conv2_block3_3_conv[0][0]']
ization)
conv2_block3_add (Add) (None, 64, 64, 256) 0 ['conv2_block2_out[0][0]',
'conv2_block3_3_bn[0][0]']
conv2_block3_out (Activation) (None, 64, 64, 256) 0 ['conv2_block3_add[0][0]']
conv3_block1_1_conv (Conv2D) (None, 32, 32, 128) 32896 ['conv2_block3_out[0][0]']
conv3_block1_1_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block1_1_conv[0][0]']
ization)
conv3_block1_1_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block1_1_bn[0][0]']
n)
conv3_block1_2_conv (Conv2D) (None, 32, 32, 128) 147584 ['conv3_block1_1_relu[0][0]']
conv3_block1_2_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block1_2_conv[0][0]']
ization)
conv3_block1_2_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block1_2_bn[0][0]']
n)
conv3_block1_0_conv (Conv2D) (None, 32, 32, 512) 131584 ['conv2_block3_out[0][0]']
conv3_block1_3_conv (Conv2D) (None, 32, 32, 512) 66048 ['conv3_block1_2_relu[0][0]']
conv3_block1_0_bn (BatchNormal (None, 32, 32, 512) 2048 ['conv3_block1_0_conv[0][0]']
ization)
conv3_block1_3_bn (BatchNormal (None, 32, 32, 512) 2048 ['conv3_block1_3_conv[0][0]']
ization)
conv3_block1_add (Add) (None, 32, 32, 512) 0 ['conv3_block1_0_bn[0][0]',
'conv3_block1_3_bn[0][0]']
conv3_block1_out (Activation) (None, 32, 32, 512) 0 ['conv3_block1_add[0][0]']
conv3_block2_1_conv (Conv2D) (None, 32, 32, 128) 65664 ['conv3_block1_out[0][0]']
conv3_block2_1_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block2_1_conv[0][0]']
ization)
conv3_block2_1_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block2_1_bn[0][0]']
n)
conv3_block2_2_conv (Conv2D) (None, 32, 32, 128) 147584 ['conv3_block2_1_relu[0][0]']
conv3_block2_2_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block2_2_conv[0][0]']
ization)
conv3_block2_2_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block2_2_bn[0][0]']
n)
conv3_block2_3_conv (Conv2D) (None, 32, 32, 512) 66048 ['conv3_block2_2_relu[0][0]']
conv3_block2_3_bn (BatchNormal (None, 32, 32, 512) 2048 ['conv3_block2_3_conv[0][0]']
ization)
conv3_block2_add (Add) (None, 32, 32, 512) 0 ['conv3_block1_out[0][0]',
'conv3_block2_3_bn[0][0]']
conv3_block2_out (Activation) (None, 32, 32, 512) 0 ['conv3_block2_add[0][0]']
conv3_block3_1_conv (Conv2D) (None, 32, 32, 128) 65664 ['conv3_block2_out[0][0]']
conv3_block3_1_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block3_1_conv[0][0]']
ization)
conv3_block3_1_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block3_1_bn[0][0]']
n)
conv3_block3_2_conv (Conv2D) (None, 32, 32, 128) 147584 ['conv3_block3_1_relu[0][0]']
conv3_block3_2_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block3_2_conv[0][0]']
ization)
conv3_block3_2_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block3_2_bn[0][0]']
n)
conv3_block3_3_conv (Conv2D) (None, 32, 32, 512) 66048 ['conv3_block3_2_relu[0][0]']
conv3_block3_3_bn (BatchNormal (None, 32, 32, 512) 2048 ['conv3_block3_3_conv[0][0]']
ization)
conv3_block3_add (Add) (None, 32, 32, 512) 0 ['conv3_block2_out[0][0]',
'conv3_block3_3_bn[0][0]']
conv3_block3_out (Activation) (None, 32, 32, 512) 0 ['conv3_block3_add[0][0]']
conv3_block4_1_conv (Conv2D) (None, 32, 32, 128) 65664 ['conv3_block3_out[0][0]']
conv3_block4_1_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block4_1_conv[0][0]']
ization)
conv3_block4_1_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block4_1_bn[0][0]']
n)
conv3_block4_2_conv (Conv2D) (None, 32, 32, 128) 147584 ['conv3_block4_1_relu[0][0]']
conv3_block4_2_bn (BatchNormal (None, 32, 32, 128) 512 ['conv3_block4_2_conv[0][0]']
ization)
conv3_block4_2_relu (Activatio (None, 32, 32, 128) 0 ['conv3_block4_2_bn[0][0]']
n)
conv3_block4_3_conv (Conv2D) (None, 32, 32, 512) 66048 ['conv3_block4_2_relu[0][0]']
conv3_block4_3_bn (BatchNormal (None, 32, 32, 512) 2048 ['conv3_block4_3_conv[0][0]']
ization)
conv3_block4_add (Add) (None, 32, 32, 512) 0 ['conv3_block3_out[0][0]',
'conv3_block4_3_bn[0][0]']
conv3_block4_out (Activation) (None, 32, 32, 512) 0 ['conv3_block4_add[0][0]']
conv4_block1_1_conv (Conv2D) (None, 16, 16, 256) 131328 ['conv3_block4_out[0][0]']
conv4_block1_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block1_1_conv[0][0]']
ization)
conv4_block1_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block1_1_bn[0][0]']
n)
conv4_block1_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block1_1_relu[0][0]']
conv4_block1_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block1_2_conv[0][0]']
ization)
conv4_block1_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block1_2_bn[0][0]']
n)
conv4_block1_0_conv (Conv2D) (None, 16, 16, 1024 525312 ['conv3_block4_out[0][0]']
)
conv4_block1_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block1_2_relu[0][0]']
)
conv4_block1_0_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block1_0_conv[0][0]']
ization) )
conv4_block1_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block1_3_conv[0][0]']
ization) )
conv4_block1_add (Add) (None, 16, 16, 1024 0 ['conv4_block1_0_bn[0][0]',
) 'conv4_block1_3_bn[0][0]']
conv4_block1_out (Activation) (None, 16, 16, 1024 0 ['conv4_block1_add[0][0]']
)
conv4_block2_1_conv (Conv2D) (None, 16, 16, 256) 262400 ['conv4_block1_out[0][0]']
conv4_block2_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block2_1_conv[0][0]']
ization)
conv4_block2_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block2_1_bn[0][0]']
n)
conv4_block2_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block2_1_relu[0][0]']
conv4_block2_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block2_2_conv[0][0]']
ization)
conv4_block2_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block2_2_bn[0][0]']
n)
conv4_block2_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block2_2_relu[0][0]']
)
conv4_block2_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block2_3_conv[0][0]']
ization) )
conv4_block2_add (Add) (None, 16, 16, 1024 0 ['conv4_block1_out[0][0]',
) 'conv4_block2_3_bn[0][0]']
conv4_block2_out (Activation) (None, 16, 16, 1024 0 ['conv4_block2_add[0][0]']
)
conv4_block3_1_conv (Conv2D) (None, 16, 16, 256) 262400 ['conv4_block2_out[0][0]']
conv4_block3_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block3_1_conv[0][0]']
ization)
conv4_block3_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block3_1_bn[0][0]']
n)
conv4_block3_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block3_1_relu[0][0]']
conv4_block3_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block3_2_conv[0][0]']
ization)
conv4_block3_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block3_2_bn[0][0]']
n)
conv4_block3_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block3_2_relu[0][0]']
)
conv4_block3_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block3_3_conv[0][0]']
ization) )
conv4_block3_add (Add) (None, 16, 16, 1024 0 ['conv4_block2_out[0][0]',
) 'conv4_block3_3_bn[0][0]']
conv4_block3_out (Activation) (None, 16, 16, 1024 0 ['conv4_block3_add[0][0]']
)
conv4_block4_1_conv (Conv2D) (None, 16, 16, 256) 262400 ['conv4_block3_out[0][0]']
conv4_block4_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block4_1_conv[0][0]']
ization)
conv4_block4_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block4_1_bn[0][0]']
n)
conv4_block4_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block4_1_relu[0][0]']
conv4_block4_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block4_2_conv[0][0]']
ization)
conv4_block4_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block4_2_bn[0][0]']
n)
conv4_block4_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block4_2_relu[0][0]']
)
conv4_block4_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block4_3_conv[0][0]']
ization) )
conv4_block4_add (Add) (None, 16, 16, 1024 0 ['conv4_block3_out[0][0]',
) 'conv4_block4_3_bn[0][0]']
conv4_block4_out (Activation) (None, 16, 16, 1024 0 ['conv4_block4_add[0][0]']
)
conv4_block5_1_conv (Conv2D) (None, 16, 16, 256) 262400 ['conv4_block4_out[0][0]']
conv4_block5_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block5_1_conv[0][0]']
ization)
conv4_block5_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block5_1_bn[0][0]']
n)
conv4_block5_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block5_1_relu[0][0]']
conv4_block5_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block5_2_conv[0][0]']
ization)
conv4_block5_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block5_2_bn[0][0]']
n)
conv4_block5_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block5_2_relu[0][0]']
)
conv4_block5_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block5_3_conv[0][0]']
ization) )
conv4_block5_add (Add) (None, 16, 16, 1024 0 ['conv4_block4_out[0][0]',
) 'conv4_block5_3_bn[0][0]']
conv4_block5_out (Activation) (None, 16, 16, 1024 0 ['conv4_block5_add[0][0]']
)
conv4_block6_1_conv (Conv2D) (None, 16, 16, 256) 262400 ['conv4_block5_out[0][0]']
conv4_block6_1_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block6_1_conv[0][0]']
ization)
conv4_block6_1_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block6_1_bn[0][0]']
n)
conv4_block6_2_conv (Conv2D) (None, 16, 16, 256) 590080 ['conv4_block6_1_relu[0][0]']
conv4_block6_2_bn (BatchNormal (None, 16, 16, 256) 1024 ['conv4_block6_2_conv[0][0]']
ization)
conv4_block6_2_relu (Activatio (None, 16, 16, 256) 0 ['conv4_block6_2_bn[0][0]']
n)
conv4_block6_3_conv (Conv2D) (None, 16, 16, 1024 263168 ['conv4_block6_2_relu[0][0]']
)
conv4_block6_3_bn (BatchNormal (None, 16, 16, 1024 4096 ['conv4_block6_3_conv[0][0]']
ization) )
conv4_block6_add (Add) (None, 16, 16, 1024 0 ['conv4_block5_out[0][0]',
) 'conv4_block6_3_bn[0][0]']
conv4_block6_out (Activation) (None, 16, 16, 1024 0 ['conv4_block6_add[0][0]']
)
conv5_block1_1_conv (Conv2D) (None, 8, 8, 512) 524800 ['conv4_block6_out[0][0]']
conv5_block1_1_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block1_1_conv[0][0]']
ization)
conv5_block1_1_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block1_1_bn[0][0]']
n)
conv5_block1_2_conv (Conv2D) (None, 8, 8, 512) 2359808 ['conv5_block1_1_relu[0][0]']
conv5_block1_2_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block1_2_conv[0][0]']
ization)
conv5_block1_2_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block1_2_bn[0][0]']
n)
conv5_block1_0_conv (Conv2D) (None, 8, 8, 2048) 2099200 ['conv4_block6_out[0][0]']
conv5_block1_3_conv (Conv2D) (None, 8, 8, 2048) 1050624 ['conv5_block1_2_relu[0][0]']
conv5_block1_0_bn (BatchNormal (None, 8, 8, 2048) 8192 ['conv5_block1_0_conv[0][0]']
ization)
conv5_block1_3_bn (BatchNormal (None, 8, 8, 2048) 8192 ['conv5_block1_3_conv[0][0]']
ization)
conv5_block1_add (Add) (None, 8, 8, 2048) 0 ['conv5_block1_0_bn[0][0]',
'conv5_block1_3_bn[0][0]']
conv5_block1_out (Activation) (None, 8, 8, 2048) 0 ['conv5_block1_add[0][0]']
conv5_block2_1_conv (Conv2D) (None, 8, 8, 512) 1049088 ['conv5_block1_out[0][0]']
conv5_block2_1_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block2_1_conv[0][0]']
ization)
conv5_block2_1_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block2_1_bn[0][0]']
n)
conv5_block2_2_conv (Conv2D) (None, 8, 8, 512) 2359808 ['conv5_block2_1_relu[0][0]']
conv5_block2_2_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block2_2_conv[0][0]']
ization)
conv5_block2_2_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block2_2_bn[0][0]']
n)
conv5_block2_3_conv (Conv2D) (None, 8, 8, 2048) 1050624 ['conv5_block2_2_relu[0][0]']
conv5_block2_3_bn (BatchNormal (None, 8, 8, 2048) 8192 ['conv5_block2_3_conv[0][0]']
ization)
conv5_block2_add (Add) (None, 8, 8, 2048) 0 ['conv5_block1_out[0][0]',
'conv5_block2_3_bn[0][0]']
conv5_block2_out (Activation) (None, 8, 8, 2048) 0 ['conv5_block2_add[0][0]']
conv5_block3_1_conv (Conv2D) (None, 8, 8, 512) 1049088 ['conv5_block2_out[0][0]']
conv5_block3_1_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block3_1_conv[0][0]']
ization)
conv5_block3_1_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block3_1_bn[0][0]']
n)
conv5_block3_2_conv (Conv2D) (None, 8, 8, 512) 2359808 ['conv5_block3_1_relu[0][0]']
conv5_block3_2_bn (BatchNormal (None, 8, 8, 512) 2048 ['conv5_block3_2_conv[0][0]']
ization)
conv5_block3_2_relu (Activatio (None, 8, 8, 512) 0 ['conv5_block3_2_bn[0][0]']
n)
conv5_block3_3_conv (Conv2D) (None, 8, 8, 2048) 1050624 ['conv5_block3_2_relu[0][0]']
conv5_block3_3_bn (BatchNormal (None, 8, 8, 2048) 8192 ['conv5_block3_3_conv[0][0]']
ization)
conv5_block3_add (Add) (None, 8, 8, 2048) 0 ['conv5_block2_out[0][0]',
'conv5_block3_3_bn[0][0]']
conv5_block3_out (Activation) (None, 8, 8, 2048) 0 ['conv5_block3_add[0][0]']
average_pooling2d (AveragePool (None, 2, 2, 2048) 0 ['conv5_block3_out[0][0]']
ing2D)
Flatten (Flatten) (None, 8192) 0 ['average_pooling2d[0][0]']
dense (Dense) (None, 256) 2097408 ['Flatten[0][0]']
dropout (Dropout) (None, 256) 0 ['dense[0][0]']
dense_1 (Dense) (None, 256) 65792 ['dropout[0][0]']
dropout_1 (Dropout) (None, 256) 0 ['dense_1[0][0]']
dense_2 (Dense) (None, 2) 514 ['dropout_1[0][0]']
==================================================================================================
Total params: 25,751,426
Trainable params: 25,698,306
Non-trainable params: 53,120
__________________________________________________________________________________________________
earlystopping = EarlyStopping(monitor='val_loss',
mode='min',
verbose=1,
patience=15
)
checkpointer = ModelCheckpoint(filepath="clf-resnet-weights.hdf5",
verbose=1,
save_best_only=True
)
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
mode='min',
verbose=1,
patience=10,
min_delta=0.0001,
factor=0.2
)
callbacks = [checkpointer, earlystopping, reduce_lr]
h = model.fit(train_generator,
steps_per_epoch= train_generator.n // train_generator.batch_size,
epochs = 50,
validation_data= valid_generator,
validation_steps= valid_generator.n // valid_generator.batch_size,
callbacks=[checkpointer, earlystopping])
2022-06-10 17:37:15.093699: W tensorflow/core/platform/profile_utils/cpu_utils.cc:128] Failed to get CPU frequency: 0 Hz
Epoch 1/50
2022-06-10 17:37:17.180872: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:112] Plugin optimizer for device_type GPU is enabled.
187/187 [==============================] - ETA: 0s - loss: 0.5999 - accuracy: 0.7171
2022-06-10 17:39:42.833433: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:112] Plugin optimizer for device_type GPU is enabled.
Epoch 00001: val_loss improved from inf to 0.71997, saving model to clf-resnet-weights.hdf5
/Users/thomas/miniforge3/envs/M1/lib/python3.9/site-packages/keras/engine/functional.py:1410: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.
187/187 [==============================] - 153s 769ms/step - loss: 0.5999 - accuracy: 0.7171 - val_loss: 0.7200 - val_accuracy: 0.3344 Epoch 2/50 187/187 [==============================] - ETA: 0s - loss: 0.4614 - accuracy: 0.7923 Epoch 00002: val_loss did not improve from 0.71997 187/187 [==============================] - 143s 763ms/step - loss: 0.4614 - accuracy: 0.7923 - val_loss: 0.8621 - val_accuracy: 0.3313 Epoch 3/50 187/187 [==============================] - ETA: 0s - loss: 0.3997 - accuracy: 0.8114 Epoch 00003: val_loss improved from 0.71997 to 0.71181, saving model to clf-resnet-weights.hdf5 187/187 [==============================] - 141s 752ms/step - loss: 0.3997 - accuracy: 0.8114 - val_loss: 0.7118 - val_accuracy: 0.3438 Epoch 4/50 187/187 [==============================] - ETA: 0s - loss: 0.3335 - accuracy: 0.8572 Epoch 00004: val_loss did not improve from 0.71181 187/187 [==============================] - 140s 750ms/step - loss: 0.3335 - accuracy: 0.8572 - val_loss: 0.7329 - val_accuracy: 0.6656 Epoch 5/50 187/187 [==============================] - ETA: 0s - loss: 0.3034 - accuracy: 0.8722 Epoch 00005: val_loss did not improve from 0.71181 187/187 [==============================] - 144s 768ms/step - loss: 0.3034 - accuracy: 0.8722 - val_loss: 0.8896 - val_accuracy: 0.7156 Epoch 6/50 187/187 [==============================] - ETA: 0s - loss: 0.2898 - accuracy: 0.8776 Epoch 00006: val_loss improved from 0.71181 to 0.34179, saving model to clf-resnet-weights.hdf5 187/187 [==============================] - 144s 768ms/step - loss: 0.2898 - accuracy: 0.8776 - val_loss: 0.3418 - val_accuracy: 0.8625 Epoch 7/50 187/187 [==============================] - ETA: 0s - loss: 0.2539 - accuracy: 0.8977 Epoch 00007: val_loss did not improve from 0.34179 187/187 [==============================] - 144s 772ms/step - loss: 0.2539 - accuracy: 0.8977 - val_loss: 0.5956 - val_accuracy: 0.7063 Epoch 8/50 187/187 [==============================] - ETA: 0s - loss: 0.2277 - accuracy: 0.9100 Epoch 00008: val_loss did not improve from 0.34179 187/187 [==============================] - 141s 754ms/step - loss: 0.2277 - accuracy: 0.9100 - val_loss: 1.5456 - val_accuracy: 0.8031 Epoch 9/50 187/187 [==============================] - ETA: 0s - loss: 0.2038 - accuracy: 0.9244 Epoch 00009: val_loss improved from 0.34179 to 0.20197, saving model to clf-resnet-weights.hdf5 187/187 [==============================] - 141s 755ms/step - loss: 0.2038 - accuracy: 0.9244 - val_loss: 0.2020 - val_accuracy: 0.9344 Epoch 10/50 187/187 [==============================] - ETA: 0s - loss: 0.1884 - accuracy: 0.9304 Epoch 00010: val_loss improved from 0.20197 to 0.19056, saving model to clf-resnet-weights.hdf5 187/187 [==============================] - 142s 759ms/step - loss: 0.1884 - accuracy: 0.9304 - val_loss: 0.1906 - val_accuracy: 0.9375 Epoch 11/50 187/187 [==============================] - ETA: 0s - loss: 0.1638 - accuracy: 0.9418 Epoch 00011: val_loss improved from 0.19056 to 0.13412, saving model to clf-resnet-weights.hdf5 187/187 [==============================] - 144s 772ms/step - loss: 0.1638 - accuracy: 0.9418 - val_loss: 0.1341 - val_accuracy: 0.9531 Epoch 12/50 187/187 [==============================] - ETA: 0s - loss: 0.1362 - accuracy: 0.9515 Epoch 00012: val_loss did not improve from 0.13412 187/187 [==============================] - 143s 764ms/step - loss: 0.1362 - accuracy: 0.9515 - val_loss: 0.2214 - val_accuracy: 0.9125 Epoch 13/50 187/187 [==============================] - ETA: 0s - loss: 0.1444 - accuracy: 0.9502 Epoch 00013: val_loss did not improve from 0.13412 187/187 [==============================] - 141s 754ms/step - loss: 0.1444 - accuracy: 0.9502 - val_loss: 0.2083 - val_accuracy: 0.9375 Epoch 14/50 187/187 [==============================] - ETA: 0s - loss: 0.1080 - accuracy: 0.9612 Epoch 00014: val_loss improved from 0.13412 to 0.09761, saving model to clf-resnet-weights.hdf5 187/187 [==============================] - 141s 756ms/step - loss: 0.1080 - accuracy: 0.9612 - val_loss: 0.0976 - val_accuracy: 0.9563 Epoch 15/50 187/187 [==============================] - ETA: 0s - loss: 0.1221 - accuracy: 0.9555 Epoch 00015: val_loss did not improve from 0.09761 187/187 [==============================] - 142s 758ms/step - loss: 0.1221 - accuracy: 0.9555 - val_loss: 0.3236 - val_accuracy: 0.9062 Epoch 16/50 187/187 [==============================] - ETA: 0s - loss: 0.1093 - accuracy: 0.9615 Epoch 00016: val_loss did not improve from 0.09761 187/187 [==============================] - 140s 750ms/step - loss: 0.1093 - accuracy: 0.9615 - val_loss: 0.1740 - val_accuracy: 0.9375 Epoch 17/50 187/187 [==============================] - ETA: 0s - loss: 0.0859 - accuracy: 0.9702 Epoch 00017: val_loss did not improve from 0.09761 187/187 [==============================] - 142s 761ms/step - loss: 0.0859 - accuracy: 0.9702 - val_loss: 0.1327 - val_accuracy: 0.9469 Epoch 18/50 187/187 [==============================] - ETA: 0s - loss: 0.0922 - accuracy: 0.9682 Epoch 00018: val_loss did not improve from 0.09761 187/187 [==============================] - 151s 809ms/step - loss: 0.0922 - accuracy: 0.9682 - val_loss: 0.1180 - val_accuracy: 0.9563 Epoch 19/50 187/187 [==============================] - ETA: 0s - loss: 0.1414 - accuracy: 0.9542 Epoch 00019: val_loss did not improve from 0.09761 187/187 [==============================] - 143s 762ms/step - loss: 0.1414 - accuracy: 0.9542 - val_loss: 0.1418 - val_accuracy: 0.9406 Epoch 20/50 187/187 [==============================] - ETA: 0s - loss: 0.1055 - accuracy: 0.9632 Epoch 00020: val_loss did not improve from 0.09761 187/187 [==============================] - 146s 783ms/step - loss: 0.1055 - accuracy: 0.9632 - val_loss: 0.1875 - val_accuracy: 0.9469 Epoch 21/50 187/187 [==============================] - ETA: 0s - loss: 0.0844 - accuracy: 0.9706 Epoch 00021: val_loss did not improve from 0.09761 187/187 [==============================] - 142s 759ms/step - loss: 0.0844 - accuracy: 0.9706 - val_loss: 0.2847 - val_accuracy: 0.8813 Epoch 22/50 187/187 [==============================] - ETA: 0s - loss: 0.0667 - accuracy: 0.9793 Epoch 00022: val_loss did not improve from 0.09761 187/187 [==============================] - 144s 769ms/step - loss: 0.0667 - accuracy: 0.9793 - val_loss: 0.1334 - val_accuracy: 0.9563 Epoch 23/50 187/187 [==============================] - ETA: 0s - loss: 0.0731 - accuracy: 0.9739 Epoch 00023: val_loss did not improve from 0.09761 187/187 [==============================] - 145s 777ms/step - loss: 0.0731 - accuracy: 0.9739 - val_loss: 0.2524 - val_accuracy: 0.9219 Epoch 24/50 187/187 [==============================] - ETA: 0s - loss: 0.1027 - accuracy: 0.9642 Epoch 00024: val_loss did not improve from 0.09761 187/187 [==============================] - 141s 757ms/step - loss: 0.1027 - accuracy: 0.9642 - val_loss: 0.2035 - val_accuracy: 0.9344 Epoch 25/50 187/187 [==============================] - ETA: 0s - loss: 0.0647 - accuracy: 0.9783 Epoch 00025: val_loss did not improve from 0.09761 187/187 [==============================] - 140s 750ms/step - loss: 0.0647 - accuracy: 0.9783 - val_loss: 0.1097 - val_accuracy: 0.9563 Epoch 26/50 187/187 [==============================] - ETA: 0s - loss: 0.0496 - accuracy: 0.9839 Epoch 00026: val_loss did not improve from 0.09761 187/187 [==============================] - 142s 761ms/step - loss: 0.0496 - accuracy: 0.9839 - val_loss: 0.1192 - val_accuracy: 0.9594 Epoch 27/50 187/187 [==============================] - ETA: 0s - loss: 0.0388 - accuracy: 0.9873 Epoch 00027: val_loss did not improve from 0.09761 187/187 [==============================] - 142s 761ms/step - loss: 0.0388 - accuracy: 0.9873 - val_loss: 0.1467 - val_accuracy: 0.9656 Epoch 28/50 187/187 [==============================] - ETA: 0s - loss: 0.0441 - accuracy: 0.9833 Epoch 00028: val_loss did not improve from 0.09761 187/187 [==============================] - 141s 756ms/step - loss: 0.0441 - accuracy: 0.9833 - val_loss: 0.1319 - val_accuracy: 0.9531 Epoch 29/50 187/187 [==============================] - ETA: 0s - loss: 0.0491 - accuracy: 0.9826 Epoch 00029: val_loss did not improve from 0.09761 187/187 [==============================] - 140s 751ms/step - loss: 0.0491 - accuracy: 0.9826 - val_loss: 0.4027 - val_accuracy: 0.8688 Epoch 00029: early stopping
# saving model achitecture in json file
model_json = model.to_json()
with open("clf-resnet-model.json", "w") as json_file:
json_file.write(model_json)
h.history.keys()
dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])
plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
plt.plot(h.history['loss']);
plt.plot(h.history['val_loss']);
plt.title("Classification Model LOSS");
plt.ylabel("loss");
plt.xlabel("Epochs");
plt.legend(['train', 'val']);
plt.subplot(1,2,2)
plt.plot(h.history['accuracy']);
plt.plot(h.history['val_accuracy']);
plt.title("Classification Model Acc");
plt.ylabel("Accuracy");
plt.xlabel("Epochs");
plt.legend(['train', 'val']);
_, acc = model.evaluate(test_generator)
print("Test accuracy : {} %".format(acc*100))
37/37 [==============================] - 8s 219ms/step - loss: 0.6052 - accuracy: 0.8644 Test accuracy : 86.44067645072937 %
prediction = model.predict(test_generator)
pred = np.argmax(prediction, axis=1)
#pred = np.asarray(pred).astype('str')
original = np.asarray(test['mask']).astype('int')
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
accuracy = accuracy_score(original, pred)
print(accuracy)
cm = confusion_matrix(original, pred)
report = classification_report(original, pred, labels = [0,1])
print(report)
plt.figure(figsize = (5,5))
sns.heatmap(cm, annot=True);
2022-06-10 18:52:31.871404: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:112] Plugin optimizer for device_type GPU is enabled.
0.864406779661017
precision recall f1-score support
0 0.90 0.88 0.89 376
1 0.80 0.83 0.82 214
accuracy 0.86 590
macro avg 0.85 0.86 0.85 590
weighted avg 0.87 0.86 0.86 590
brain_df_mask = brain_df[brain_df['mask'] == 1]
brain_df_mask.shape
(1373, 4)
# creating test, train and val sets
X_train, X_val = train_test_split(brain_df_mask, test_size=0.15)
X_test, X_val = train_test_split(X_val, test_size=0.5)
print("Train size is {}, valid size is {} & test size is {}".format(len(X_train), len(X_val), len(X_test)))
train_ids = list(X_train.image_path)
train_mask = list(X_train.mask_path)
val_ids = list(X_val.image_path)
val_mask= list(X_val.mask_path)
Train size is 1167, valid size is 103 & test size is 103
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, ids , mask, image_dir = './', batch_size = 16, img_h = 256, img_w = 256, shuffle = True):
self.ids = ids
self.mask = mask
self.image_dir = image_dir
self.batch_size = batch_size
self.img_h = img_h
self.img_w = img_w
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Get the number of batches per epoch'
return int(np.floor(len(self.ids)) / self.batch_size)
def __getitem__(self, index):
'Generate a batch of data'
#generate index of batch_size length
indexes = self.indexes[index* self.batch_size : (index+1) * self.batch_size]
#get the ImageId corresponding to the indexes created above based on batch size
list_ids = [self.ids[i] for i in indexes]
#get the MaskId corresponding to the indexes created above based on batch size
list_mask = [self.mask[i] for i in indexes]
#generate data for the X(features) and y(label)
X, y = self.__data_generation(list_ids, list_mask)
#returning the data
return X, y
def on_epoch_end(self):
'Used for updating the indices after each epoch, once at the beginning as well as at the end of each epoch'
#getting the array of indices based on the input dataframe
self.indexes = np.arange(len(self.ids))
#if shuffle is true, shuffle the indices
if self.shuffle:
np.random.shuffle(self.indexes)
def __data_generation(self, list_ids, list_mask):
'generate the data corresponding the indexes in a given batch of images'
# create empty arrays of shape (batch_size,height,width,depth)
#Depth is 3 for input and depth is taken as 1 for output becasue mask consist only of 1 channel.
X = np.empty((self.batch_size, self.img_h, self.img_w, 3))
y = np.empty((self.batch_size, self.img_h, self.img_w, 1))
#iterate through the dataframe rows, whose size is equal to the batch_size
for i in range(len(list_ids)):
#path of the image
img_path = str(list_ids[i])
#mask path
mask_path = str(list_mask[i])
#reading the original image and the corresponding mask image
img = io.imread(img_path)
mask = io.imread(mask_path)
#resizing and coverting them to array of type float64
img = cv2.resize(img,(self.img_h,self.img_w))
img = np.array(img, dtype = np.float64)
mask = cv2.resize(mask,(self.img_h,self.img_w))
mask = np.array(mask, dtype = np.float64)
#standardising
img -= img.mean()
img /= img.std()
mask -= mask.mean()
mask /= mask.std()
#Adding image to the empty array
X[i,] = img
#expanding the dimnesion of the image from (256,256) to (256,256,1)
y[i,] = np.expand_dims(mask, axis = 2)
#normalizing y
y = (y > 0).astype(int)
return X, y
train_data = DataGenerator(train_ids, train_mask)
val_data = DataGenerator(val_ids, val_mask)
# lets create model now
def resblock(X, f):
'''
function for creating res block
'''
X_copy = X #copy of input
# main path
X = Conv2D(f, kernel_size=(1,1), kernel_initializer='he_normal')(X)
X = BatchNormalization()(X)
X = Activation('relu')(X)
X = Conv2D(f, kernel_size=(3,3), padding='same', kernel_initializer='he_normal')(X)
X = BatchNormalization()(X)
# shortcut path
X_copy = Conv2D(f, kernel_size=(1,1), kernel_initializer='he_normal')(X_copy)
X_copy = BatchNormalization()(X_copy)
# Adding the output from main path and short path together
X = Add()([X, X_copy])
X = Activation('relu')(X)
return X
def upsample_concat(x, skip):
'''
funtion for upsampling image
'''
X = UpSampling2D((2,2))(x)
merge = Concatenate()([X, skip])
return merge
input_shape = (256,256,3)
X_input = Input(input_shape) #iniating tensor of input shape
# Stage 1
conv_1 = Conv2D(16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(X_input)
conv_1 = BatchNormalization()(conv_1)
conv_1 = Conv2D(16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv_1)
conv_1 = BatchNormalization()(conv_1)
pool_1 = MaxPool2D((2,2))(conv_1)
# stage 2
conv_2 = resblock(pool_1, 32)
pool_2 = MaxPool2D((2,2))(conv_2)
# Stage 3
conv_3 = resblock(pool_2, 64)
pool_3 = MaxPool2D((2,2))(conv_3)
# Stage 4
conv_4 = resblock(pool_3, 128)
pool_4 = MaxPool2D((2,2))(conv_4)
# Stage 5 (bottle neck)
conv_5 = resblock(pool_4, 256)
# Upsample Stage 1
up_1 = upsample_concat(conv_5, conv_4)
up_1 = resblock(up_1, 128)
# Upsample Stage 2
up_2 = upsample_concat(up_1, conv_3)
up_2 = resblock(up_2, 64)
# Upsample Stage 3
up_3 = upsample_concat(up_2, conv_2)
up_3 = resblock(up_3, 32)
# Upsample Stage 4
up_4 = upsample_concat(up_3, conv_1)
up_4 = resblock(up_4, 16)
# final output
out = Conv2D(1, (1,1), kernel_initializer='he_normal', padding='same', activation='sigmoid')(up_4)
seg_model = Model(X_input, out)
seg_model.summary()
Model: "model_1"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_2 (InputLayer) [(None, 256, 256, 3 0 []
)]
conv2d (Conv2D) (None, 256, 256, 16 448 ['input_2[0][0]']
)
batch_normalization (BatchNorm (None, 256, 256, 16 64 ['conv2d[0][0]']
alization) )
conv2d_1 (Conv2D) (None, 256, 256, 16 2320 ['batch_normalization[0][0]']
)
batch_normalization_1 (BatchNo (None, 256, 256, 16 64 ['conv2d_1[0][0]']
rmalization) )
max_pooling2d (MaxPooling2D) (None, 128, 128, 16 0 ['batch_normalization_1[0][0]']
)
conv2d_2 (Conv2D) (None, 128, 128, 32 544 ['max_pooling2d[0][0]']
)
batch_normalization_2 (BatchNo (None, 128, 128, 32 128 ['conv2d_2[0][0]']
rmalization) )
activation (Activation) (None, 128, 128, 32 0 ['batch_normalization_2[0][0]']
)
conv2d_3 (Conv2D) (None, 128, 128, 32 9248 ['activation[0][0]']
)
conv2d_4 (Conv2D) (None, 128, 128, 32 544 ['max_pooling2d[0][0]']
)
batch_normalization_3 (BatchNo (None, 128, 128, 32 128 ['conv2d_3[0][0]']
rmalization) )
batch_normalization_4 (BatchNo (None, 128, 128, 32 128 ['conv2d_4[0][0]']
rmalization) )
add (Add) (None, 128, 128, 32 0 ['batch_normalization_3[0][0]',
) 'batch_normalization_4[0][0]']
activation_1 (Activation) (None, 128, 128, 32 0 ['add[0][0]']
)
max_pooling2d_1 (MaxPooling2D) (None, 64, 64, 32) 0 ['activation_1[0][0]']
conv2d_5 (Conv2D) (None, 64, 64, 64) 2112 ['max_pooling2d_1[0][0]']
batch_normalization_5 (BatchNo (None, 64, 64, 64) 256 ['conv2d_5[0][0]']
rmalization)
activation_2 (Activation) (None, 64, 64, 64) 0 ['batch_normalization_5[0][0]']
conv2d_6 (Conv2D) (None, 64, 64, 64) 36928 ['activation_2[0][0]']
conv2d_7 (Conv2D) (None, 64, 64, 64) 2112 ['max_pooling2d_1[0][0]']
batch_normalization_6 (BatchNo (None, 64, 64, 64) 256 ['conv2d_6[0][0]']
rmalization)
batch_normalization_7 (BatchNo (None, 64, 64, 64) 256 ['conv2d_7[0][0]']
rmalization)
add_1 (Add) (None, 64, 64, 64) 0 ['batch_normalization_6[0][0]',
'batch_normalization_7[0][0]']
activation_3 (Activation) (None, 64, 64, 64) 0 ['add_1[0][0]']
max_pooling2d_2 (MaxPooling2D) (None, 32, 32, 64) 0 ['activation_3[0][0]']
conv2d_8 (Conv2D) (None, 32, 32, 128) 8320 ['max_pooling2d_2[0][0]']
batch_normalization_8 (BatchNo (None, 32, 32, 128) 512 ['conv2d_8[0][0]']
rmalization)
activation_4 (Activation) (None, 32, 32, 128) 0 ['batch_normalization_8[0][0]']
conv2d_9 (Conv2D) (None, 32, 32, 128) 147584 ['activation_4[0][0]']
conv2d_10 (Conv2D) (None, 32, 32, 128) 8320 ['max_pooling2d_2[0][0]']
batch_normalization_9 (BatchNo (None, 32, 32, 128) 512 ['conv2d_9[0][0]']
rmalization)
batch_normalization_10 (BatchN (None, 32, 32, 128) 512 ['conv2d_10[0][0]']
ormalization)
add_2 (Add) (None, 32, 32, 128) 0 ['batch_normalization_9[0][0]',
'batch_normalization_10[0][0]']
activation_5 (Activation) (None, 32, 32, 128) 0 ['add_2[0][0]']
max_pooling2d_3 (MaxPooling2D) (None, 16, 16, 128) 0 ['activation_5[0][0]']
conv2d_11 (Conv2D) (None, 16, 16, 256) 33024 ['max_pooling2d_3[0][0]']
batch_normalization_11 (BatchN (None, 16, 16, 256) 1024 ['conv2d_11[0][0]']
ormalization)
activation_6 (Activation) (None, 16, 16, 256) 0 ['batch_normalization_11[0][0]']
conv2d_12 (Conv2D) (None, 16, 16, 256) 590080 ['activation_6[0][0]']
conv2d_13 (Conv2D) (None, 16, 16, 256) 33024 ['max_pooling2d_3[0][0]']
batch_normalization_12 (BatchN (None, 16, 16, 256) 1024 ['conv2d_12[0][0]']
ormalization)
batch_normalization_13 (BatchN (None, 16, 16, 256) 1024 ['conv2d_13[0][0]']
ormalization)
add_3 (Add) (None, 16, 16, 256) 0 ['batch_normalization_12[0][0]',
'batch_normalization_13[0][0]']
activation_7 (Activation) (None, 16, 16, 256) 0 ['add_3[0][0]']
up_sampling2d (UpSampling2D) (None, 32, 32, 256) 0 ['activation_7[0][0]']
concatenate (Concatenate) (None, 32, 32, 384) 0 ['up_sampling2d[0][0]',
'activation_5[0][0]']
conv2d_14 (Conv2D) (None, 32, 32, 128) 49280 ['concatenate[0][0]']
batch_normalization_14 (BatchN (None, 32, 32, 128) 512 ['conv2d_14[0][0]']
ormalization)
activation_8 (Activation) (None, 32, 32, 128) 0 ['batch_normalization_14[0][0]']
conv2d_15 (Conv2D) (None, 32, 32, 128) 147584 ['activation_8[0][0]']
conv2d_16 (Conv2D) (None, 32, 32, 128) 49280 ['concatenate[0][0]']
batch_normalization_15 (BatchN (None, 32, 32, 128) 512 ['conv2d_15[0][0]']
ormalization)
batch_normalization_16 (BatchN (None, 32, 32, 128) 512 ['conv2d_16[0][0]']
ormalization)
add_4 (Add) (None, 32, 32, 128) 0 ['batch_normalization_15[0][0]',
'batch_normalization_16[0][0]']
activation_9 (Activation) (None, 32, 32, 128) 0 ['add_4[0][0]']
up_sampling2d_1 (UpSampling2D) (None, 64, 64, 128) 0 ['activation_9[0][0]']
concatenate_1 (Concatenate) (None, 64, 64, 192) 0 ['up_sampling2d_1[0][0]',
'activation_3[0][0]']
conv2d_17 (Conv2D) (None, 64, 64, 64) 12352 ['concatenate_1[0][0]']
batch_normalization_17 (BatchN (None, 64, 64, 64) 256 ['conv2d_17[0][0]']
ormalization)
activation_10 (Activation) (None, 64, 64, 64) 0 ['batch_normalization_17[0][0]']
conv2d_18 (Conv2D) (None, 64, 64, 64) 36928 ['activation_10[0][0]']
conv2d_19 (Conv2D) (None, 64, 64, 64) 12352 ['concatenate_1[0][0]']
batch_normalization_18 (BatchN (None, 64, 64, 64) 256 ['conv2d_18[0][0]']
ormalization)
batch_normalization_19 (BatchN (None, 64, 64, 64) 256 ['conv2d_19[0][0]']
ormalization)
add_5 (Add) (None, 64, 64, 64) 0 ['batch_normalization_18[0][0]',
'batch_normalization_19[0][0]']
activation_11 (Activation) (None, 64, 64, 64) 0 ['add_5[0][0]']
up_sampling2d_2 (UpSampling2D) (None, 128, 128, 64 0 ['activation_11[0][0]']
)
concatenate_2 (Concatenate) (None, 128, 128, 96 0 ['up_sampling2d_2[0][0]',
) 'activation_1[0][0]']
conv2d_20 (Conv2D) (None, 128, 128, 32 3104 ['concatenate_2[0][0]']
)
batch_normalization_20 (BatchN (None, 128, 128, 32 128 ['conv2d_20[0][0]']
ormalization) )
activation_12 (Activation) (None, 128, 128, 32 0 ['batch_normalization_20[0][0]']
)
conv2d_21 (Conv2D) (None, 128, 128, 32 9248 ['activation_12[0][0]']
)
conv2d_22 (Conv2D) (None, 128, 128, 32 3104 ['concatenate_2[0][0]']
)
batch_normalization_21 (BatchN (None, 128, 128, 32 128 ['conv2d_21[0][0]']
ormalization) )
batch_normalization_22 (BatchN (None, 128, 128, 32 128 ['conv2d_22[0][0]']
ormalization) )
add_6 (Add) (None, 128, 128, 32 0 ['batch_normalization_21[0][0]',
) 'batch_normalization_22[0][0]']
activation_13 (Activation) (None, 128, 128, 32 0 ['add_6[0][0]']
)
up_sampling2d_3 (UpSampling2D) (None, 256, 256, 32 0 ['activation_13[0][0]']
)
concatenate_3 (Concatenate) (None, 256, 256, 48 0 ['up_sampling2d_3[0][0]',
) 'batch_normalization_1[0][0]']
conv2d_23 (Conv2D) (None, 256, 256, 16 784 ['concatenate_3[0][0]']
)
batch_normalization_23 (BatchN (None, 256, 256, 16 64 ['conv2d_23[0][0]']
ormalization) )
activation_14 (Activation) (None, 256, 256, 16 0 ['batch_normalization_23[0][0]']
)
conv2d_24 (Conv2D) (None, 256, 256, 16 2320 ['activation_14[0][0]']
)
conv2d_25 (Conv2D) (None, 256, 256, 16 784 ['concatenate_3[0][0]']
)
batch_normalization_24 (BatchN (None, 256, 256, 16 64 ['conv2d_24[0][0]']
ormalization) )
batch_normalization_25 (BatchN (None, 256, 256, 16 64 ['conv2d_25[0][0]']
ormalization) )
add_7 (Add) (None, 256, 256, 16 0 ['batch_normalization_24[0][0]',
) 'batch_normalization_25[0][0]']
activation_15 (Activation) (None, 256, 256, 16 0 ['add_7[0][0]']
)
conv2d_26 (Conv2D) (None, 256, 256, 1) 17 ['activation_15[0][0]']
==================================================================================================
Total params: 1,210,513
Trainable params: 1,206,129
Non-trainable params: 4,384
__________________________________________________________________________________________________
from keras.losses import binary_crossentropy
epsilon = 1e-5
smooth = 1
def tversky(y_true, y_pred):
y_true_pos = K.flatten(y_true)
y_pred_pos = K.flatten(y_pred)
true_pos = K.sum(y_true_pos * y_pred_pos)
false_neg = K.sum(y_true_pos * (1-y_pred_pos))
false_pos = K.sum((1-y_true_pos)*y_pred_pos)
alpha = 0.7
return (true_pos + smooth)/(true_pos + alpha*false_neg + (1-alpha)*false_pos + smooth)
def focal_tversky(y_true,y_pred):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
pt_1 = tversky(y_true, y_pred)
gamma = 0.75
return K.pow((1-pt_1), gamma)
def tversky_loss(y_true, y_pred):
return 1 - tversky(y_true,y_pred)
# compling model and callbacks functions
adam = tf.keras.optimizers.Adam(lr = 0.05, epsilon = 0.1)
seg_model.compile(optimizer = adam,
loss = focal_tversky,
metrics = [tversky]
)
#callbacks
earlystopping = EarlyStopping(monitor='val_loss',
mode='min',
verbose=1,
patience=20
)
# save the best model with lower validation loss
checkpointer = ModelCheckpoint(filepath="ResUNet-segModel-weights.hdf5",
verbose=1,
save_best_only=True
)
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
mode='min',
verbose=1,
patience=10,
min_delta=0.0001,
factor=0.2
)
/Users/thomas/miniforge3/envs/M1/lib/python3.9/site-packages/keras/optimizer_v2/adam.py:105: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.
h = seg_model.fit(train_data,
epochs = 60,
validation_data = val_data,
callbacks = [checkpointer, earlystopping, reduce_lr]
)
Epoch 1/60
2022-06-10 18:54:12.710468: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:112] Plugin optimizer for device_type GPU is enabled.
72/72 [==============================] - ETA: 0s - loss: 0.8898 - tversky: 0.1440
2022-06-10 18:55:04.792755: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:112] Plugin optimizer for device_type GPU is enabled.
Epoch 00001: val_loss improved from inf to 0.89048, saving model to ResUNet-segModel-weights.hdf5
/Users/thomas/miniforge3/envs/M1/lib/python3.9/site-packages/keras/engine/functional.py:1410: CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument.
72/72 [==============================] - 56s 694ms/step - loss: 0.8898 - tversky: 0.1440 - val_loss: 0.8905 - val_tversky: 0.1432 - lr: 0.0500 Epoch 2/60 72/72 [==============================] - ETA: 0s - loss: 0.7518 - tversky: 0.3136 Epoch 00002: val_loss improved from 0.89048 to 0.57963, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 50s 692ms/step - loss: 0.7518 - tversky: 0.3136 - val_loss: 0.5796 - val_tversky: 0.5158 - lr: 0.0500 Epoch 3/60 72/72 [==============================] - ETA: 0s - loss: 0.4447 - tversky: 0.6578 Epoch 00003: val_loss improved from 0.57963 to 0.54818, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 49s 683ms/step - loss: 0.4447 - tversky: 0.6578 - val_loss: 0.5482 - val_tversky: 0.5509 - lr: 0.0500 Epoch 4/60 72/72 [==============================] - ETA: 0s - loss: 0.3767 - tversky: 0.7251 Epoch 00004: val_loss did not improve from 0.54818 72/72 [==============================] - 49s 676ms/step - loss: 0.3767 - tversky: 0.7251 - val_loss: 0.6945 - val_tversky: 0.3817 - lr: 0.0500 Epoch 5/60 72/72 [==============================] - ETA: 0s - loss: 0.3095 - tversky: 0.7888 Epoch 00005: val_loss improved from 0.54818 to 0.38641, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 50s 692ms/step - loss: 0.3095 - tversky: 0.7888 - val_loss: 0.3864 - val_tversky: 0.7155 - lr: 0.0500 Epoch 6/60 72/72 [==============================] - ETA: 0s - loss: 0.2893 - tversky: 0.8072 Epoch 00006: val_loss improved from 0.38641 to 0.33000, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 49s 679ms/step - loss: 0.2893 - tversky: 0.8072 - val_loss: 0.3300 - val_tversky: 0.7702 - lr: 0.0500 Epoch 7/60 72/72 [==============================] - ETA: 0s - loss: 0.2691 - tversky: 0.8240 Epoch 00007: val_loss did not improve from 0.33000 72/72 [==============================] - 49s 675ms/step - loss: 0.2691 - tversky: 0.8240 - val_loss: 0.3531 - val_tversky: 0.7478 - lr: 0.0500 Epoch 8/60 72/72 [==============================] - ETA: 0s - loss: 0.2538 - tversky: 0.8379 Epoch 00008: val_loss improved from 0.33000 to 0.28557, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 50s 689ms/step - loss: 0.2538 - tversky: 0.8379 - val_loss: 0.2856 - val_tversky: 0.8097 - lr: 0.0500 Epoch 9/60 72/72 [==============================] - ETA: 0s - loss: 0.2415 - tversky: 0.8485 Epoch 00009: val_loss did not improve from 0.28557 72/72 [==============================] - 49s 674ms/step - loss: 0.2415 - tversky: 0.8485 - val_loss: 0.3661 - val_tversky: 0.7348 - lr: 0.0500 Epoch 10/60 72/72 [==============================] - ETA: 0s - loss: 0.2176 - tversky: 0.8681 Epoch 00010: val_loss improved from 0.28557 to 0.23862, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 50s 693ms/step - loss: 0.2176 - tversky: 0.8681 - val_loss: 0.2386 - val_tversky: 0.8507 - lr: 0.0500 Epoch 11/60 72/72 [==============================] - ETA: 0s - loss: 0.1950 - tversky: 0.8863 Epoch 00011: val_loss improved from 0.23862 to 0.22219, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 50s 692ms/step - loss: 0.1950 - tversky: 0.8863 - val_loss: 0.2222 - val_tversky: 0.8649 - lr: 0.0500 Epoch 12/60 72/72 [==============================] - ETA: 0s - loss: 0.1928 - tversky: 0.8879 Epoch 00012: val_loss improved from 0.22219 to 0.22012, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 49s 681ms/step - loss: 0.1928 - tversky: 0.8879 - val_loss: 0.2201 - val_tversky: 0.8664 - lr: 0.0500 Epoch 13/60 72/72 [==============================] - ETA: 0s - loss: 0.1764 - tversky: 0.9004 Epoch 00013: val_loss improved from 0.22012 to 0.21317, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 49s 681ms/step - loss: 0.1764 - tversky: 0.9004 - val_loss: 0.2132 - val_tversky: 0.8723 - lr: 0.0500 Epoch 14/60 72/72 [==============================] - ETA: 0s - loss: 0.1799 - tversky: 0.8977 Epoch 00014: val_loss improved from 0.21317 to 0.20903, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 50s 687ms/step - loss: 0.1799 - tversky: 0.8977 - val_loss: 0.2090 - val_tversky: 0.8756 - lr: 0.0500 Epoch 15/60 72/72 [==============================] - ETA: 0s - loss: 0.1667 - tversky: 0.9078 Epoch 00015: val_loss improved from 0.20903 to 0.20859, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 50s 688ms/step - loss: 0.1667 - tversky: 0.9078 - val_loss: 0.2086 - val_tversky: 0.8757 - lr: 0.0500 Epoch 16/60 72/72 [==============================] - ETA: 0s - loss: 0.1578 - tversky: 0.9141 Epoch 00016: val_loss did not improve from 0.20859 72/72 [==============================] - 50s 682ms/step - loss: 0.1578 - tversky: 0.9141 - val_loss: 0.2140 - val_tversky: 0.8715 - lr: 0.0500 Epoch 17/60 72/72 [==============================] - ETA: 0s - loss: 0.1608 - tversky: 0.9121 Epoch 00017: val_loss improved from 0.20859 to 0.18663, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 50s 691ms/step - loss: 0.1608 - tversky: 0.9121 - val_loss: 0.1866 - val_tversky: 0.8931 - lr: 0.0500 Epoch 18/60 72/72 [==============================] - ETA: 0s - loss: 0.1511 - tversky: 0.9190 Epoch 00018: val_loss did not improve from 0.18663 72/72 [==============================] - 50s 697ms/step - loss: 0.1511 - tversky: 0.9190 - val_loss: 0.1949 - val_tversky: 0.8865 - lr: 0.0500 Epoch 19/60 72/72 [==============================] - ETA: 0s - loss: 0.1484 - tversky: 0.9210 Epoch 00019: val_loss did not improve from 0.18663 72/72 [==============================] - 50s 695ms/step - loss: 0.1484 - tversky: 0.9210 - val_loss: 0.1919 - val_tversky: 0.8888 - lr: 0.0500 Epoch 20/60 72/72 [==============================] - ETA: 0s - loss: 0.1515 - tversky: 0.9187 Epoch 00020: val_loss did not improve from 0.18663 72/72 [==============================] - 50s 693ms/step - loss: 0.1515 - tversky: 0.9187 - val_loss: 0.2162 - val_tversky: 0.8693 - lr: 0.0500 Epoch 21/60 72/72 [==============================] - ETA: 0s - loss: 0.1509 - tversky: 0.9191 Epoch 00021: val_loss improved from 0.18663 to 0.18592, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 49s 686ms/step - loss: 0.1509 - tversky: 0.9191 - val_loss: 0.1859 - val_tversky: 0.8933 - lr: 0.0500 Epoch 22/60 72/72 [==============================] - ETA: 0s - loss: 0.1395 - tversky: 0.9272 Epoch 00022: val_loss improved from 0.18592 to 0.18497, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 51s 703ms/step - loss: 0.1395 - tversky: 0.9272 - val_loss: 0.1850 - val_tversky: 0.8943 - lr: 0.0500 Epoch 23/60 72/72 [==============================] - ETA: 0s - loss: 0.1325 - tversky: 0.9321 Epoch 00023: val_loss improved from 0.18497 to 0.17480, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 51s 702ms/step - loss: 0.1325 - tversky: 0.9321 - val_loss: 0.1748 - val_tversky: 0.9017 - lr: 0.0500 Epoch 24/60 72/72 [==============================] - ETA: 0s - loss: 0.1333 - tversky: 0.9314 Epoch 00024: val_loss did not improve from 0.17480 72/72 [==============================] - 48s 669ms/step - loss: 0.1333 - tversky: 0.9314 - val_loss: 0.1852 - val_tversky: 0.8943 - lr: 0.0500 Epoch 25/60 72/72 [==============================] - ETA: 0s - loss: 0.1280 - tversky: 0.9351 Epoch 00025: val_loss did not improve from 0.17480 72/72 [==============================] - 49s 679ms/step - loss: 0.1280 - tversky: 0.9351 - val_loss: 0.2069 - val_tversky: 0.8769 - lr: 0.0500 Epoch 26/60 72/72 [==============================] - ETA: 0s - loss: 0.1317 - tversky: 0.9326 Epoch 00026: val_loss did not improve from 0.17480 72/72 [==============================] - 49s 674ms/step - loss: 0.1317 - tversky: 0.9326 - val_loss: 0.1770 - val_tversky: 0.9002 - lr: 0.0500 Epoch 27/60 72/72 [==============================] - ETA: 0s - loss: 0.1300 - tversky: 0.9337 Epoch 00027: val_loss did not improve from 0.17480 72/72 [==============================] - 49s 679ms/step - loss: 0.1300 - tversky: 0.9337 - val_loss: 0.2671 - val_tversky: 0.8265 - lr: 0.0500 Epoch 28/60 72/72 [==============================] - ETA: 0s - loss: 0.1203 - tversky: 0.9403 Epoch 00028: val_loss did not improve from 0.17480 72/72 [==============================] - 49s 683ms/step - loss: 0.1203 - tversky: 0.9403 - val_loss: 0.1826 - val_tversky: 0.8964 - lr: 0.0500 Epoch 29/60 72/72 [==============================] - ETA: 0s - loss: 0.1161 - tversky: 0.9431 Epoch 00029: val_loss did not improve from 0.17480 72/72 [==============================] - 50s 698ms/step - loss: 0.1161 - tversky: 0.9431 - val_loss: 0.2097 - val_tversky: 0.8752 - lr: 0.0500 Epoch 30/60 72/72 [==============================] - ETA: 0s - loss: 0.1156 - tversky: 0.9435 Epoch 00030: val_loss improved from 0.17480 to 0.17341, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 49s 685ms/step - loss: 0.1156 - tversky: 0.9435 - val_loss: 0.1734 - val_tversky: 0.9032 - lr: 0.0500 Epoch 31/60 72/72 [==============================] - ETA: 0s - loss: 0.1138 - tversky: 0.9445 Epoch 00031: val_loss did not improve from 0.17341 72/72 [==============================] - 49s 685ms/step - loss: 0.1138 - tversky: 0.9445 - val_loss: 0.1908 - val_tversky: 0.8897 - lr: 0.0500 Epoch 32/60 72/72 [==============================] - ETA: 0s - loss: 0.1107 - tversky: 0.9466 Epoch 00032: val_loss did not improve from 0.17341 72/72 [==============================] - 51s 712ms/step - loss: 0.1107 - tversky: 0.9466 - val_loss: 0.1978 - val_tversky: 0.8821 - lr: 0.0500 Epoch 33/60 72/72 [==============================] - ETA: 0s - loss: 0.1108 - tversky: 0.9465 Epoch 00033: val_loss did not improve from 0.17341 72/72 [==============================] - 50s 690ms/step - loss: 0.1108 - tversky: 0.9465 - val_loss: 0.1808 - val_tversky: 0.8968 - lr: 0.0500 Epoch 34/60 72/72 [==============================] - ETA: 0s - loss: 0.1055 - tversky: 0.9500 Epoch 00034: val_loss did not improve from 0.17341 72/72 [==============================] - 50s 690ms/step - loss: 0.1055 - tversky: 0.9500 - val_loss: 0.1745 - val_tversky: 0.9020 - lr: 0.0500 Epoch 35/60 72/72 [==============================] - ETA: 0s - loss: 0.1035 - tversky: 0.9512 Epoch 00035: val_loss improved from 0.17341 to 0.17168, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 51s 712ms/step - loss: 0.1035 - tversky: 0.9512 - val_loss: 0.1717 - val_tversky: 0.9039 - lr: 0.0500 Epoch 36/60 72/72 [==============================] - ETA: 0s - loss: 0.1027 - tversky: 0.9517 Epoch 00036: val_loss did not improve from 0.17168 72/72 [==============================] - 50s 696ms/step - loss: 0.1027 - tversky: 0.9517 - val_loss: 0.1786 - val_tversky: 0.8991 - lr: 0.0500 Epoch 37/60 72/72 [==============================] - ETA: 0s - loss: 0.0988 - tversky: 0.9542 Epoch 00037: val_loss did not improve from 0.17168 72/72 [==============================] - 51s 705ms/step - loss: 0.0988 - tversky: 0.9542 - val_loss: 0.1880 - val_tversky: 0.8908 - lr: 0.0500 Epoch 38/60 72/72 [==============================] - ETA: 0s - loss: 0.1015 - tversky: 0.9523 Epoch 00038: val_loss improved from 0.17168 to 0.16694, saving model to ResUNet-segModel-weights.hdf5 72/72 [==============================] - 50s 701ms/step - loss: 0.1015 - tversky: 0.9523 - val_loss: 0.1669 - val_tversky: 0.9073 - lr: 0.0500 Epoch 39/60 72/72 [==============================] - ETA: 0s - loss: 0.0999 - tversky: 0.9535 Epoch 00039: val_loss did not improve from 0.16694 72/72 [==============================] - 51s 709ms/step - loss: 0.0999 - tversky: 0.9535 - val_loss: 0.1940 - val_tversky: 0.8873 - lr: 0.0500 Epoch 40/60 72/72 [==============================] - ETA: 0s - loss: 0.0993 - tversky: 0.9539 Epoch 00040: val_loss did not improve from 0.16694 72/72 [==============================] - 49s 673ms/step - loss: 0.0993 - tversky: 0.9539 - val_loss: 0.1928 - val_tversky: 0.8874 - lr: 0.0500 Epoch 41/60 72/72 [==============================] - ETA: 0s - loss: 0.0953 - tversky: 0.9564 Epoch 00041: val_loss did not improve from 0.16694 72/72 [==============================] - 49s 678ms/step - loss: 0.0953 - tversky: 0.9564 - val_loss: 0.1736 - val_tversky: 0.9027 - lr: 0.0500 Epoch 42/60 72/72 [==============================] - ETA: 0s - loss: 0.0926 - tversky: 0.9580 Epoch 00042: val_loss did not improve from 0.16694 72/72 [==============================] - 48s 673ms/step - loss: 0.0926 - tversky: 0.9580 - val_loss: 0.1867 - val_tversky: 0.8916 - lr: 0.0500 Epoch 43/60 72/72 [==============================] - ETA: 0s - loss: 0.0902 - tversky: 0.9594 Epoch 00043: val_loss did not improve from 0.16694 72/72 [==============================] - 48s 659ms/step - loss: 0.0902 - tversky: 0.9594 - val_loss: 0.1916 - val_tversky: 0.8881 - lr: 0.0500 Epoch 44/60 72/72 [==============================] - ETA: 0s - loss: 0.0903 - tversky: 0.9593 Epoch 00044: val_loss did not improve from 0.16694 72/72 [==============================] - 48s 664ms/step - loss: 0.0903 - tversky: 0.9593 - val_loss: 0.1791 - val_tversky: 0.8985 - lr: 0.0500 Epoch 45/60 72/72 [==============================] - ETA: 0s - loss: 0.0889 - tversky: 0.9602 Epoch 00045: val_loss did not improve from 0.16694 72/72 [==============================] - 48s 670ms/step - loss: 0.0889 - tversky: 0.9602 - val_loss: 0.1904 - val_tversky: 0.8898 - lr: 0.0500 Epoch 46/60 72/72 [==============================] - ETA: 0s - loss: 0.0905 - tversky: 0.9591 Epoch 00046: val_loss did not improve from 0.16694 72/72 [==============================] - 49s 680ms/step - loss: 0.0905 - tversky: 0.9591 - val_loss: 0.2003 - val_tversky: 0.8820 - lr: 0.0500 Epoch 47/60 72/72 [==============================] - ETA: 0s - loss: 0.0900 - tversky: 0.9595 Epoch 00047: val_loss did not improve from 0.16694 72/72 [==============================] - 49s 676ms/step - loss: 0.0900 - tversky: 0.9595 - val_loss: 0.1855 - val_tversky: 0.8933 - lr: 0.0500 Epoch 48/60 72/72 [==============================] - ETA: 0s - loss: 0.0877 - tversky: 0.9609 Epoch 00048: val_loss did not improve from 0.16694 Epoch 00048: ReduceLROnPlateau reducing learning rate to 0.010000000149011612. 72/72 [==============================] - 49s 675ms/step - loss: 0.0877 - tversky: 0.9609 - val_loss: 0.1784 - val_tversky: 0.8993 - lr: 0.0500 Epoch 49/60 72/72 [==============================] - ETA: 0s - loss: 0.0829 - tversky: 0.9637 Epoch 00049: val_loss did not improve from 0.16694 72/72 [==============================] - 49s 674ms/step - loss: 0.0829 - tversky: 0.9637 - val_loss: 0.1880 - val_tversky: 0.8911 - lr: 0.0100 Epoch 50/60 72/72 [==============================] - ETA: 0s - loss: 0.0811 - tversky: 0.9647 Epoch 00050: val_loss did not improve from 0.16694 72/72 [==============================] - 48s 668ms/step - loss: 0.0811 - tversky: 0.9647 - val_loss: 0.1788 - val_tversky: 0.8986 - lr: 0.0100 Epoch 51/60 72/72 [==============================] - ETA: 0s - loss: 0.0789 - tversky: 0.9661 Epoch 00051: val_loss did not improve from 0.16694 72/72 [==============================] - 50s 692ms/step - loss: 0.0789 - tversky: 0.9661 - val_loss: 0.1823 - val_tversky: 0.8961 - lr: 0.0100 Epoch 52/60 72/72 [==============================] - ETA: 0s - loss: 0.0786 - tversky: 0.9662 Epoch 00052: val_loss did not improve from 0.16694 72/72 [==============================] - 51s 713ms/step - loss: 0.0786 - tversky: 0.9662 - val_loss: 0.1832 - val_tversky: 0.8950 - lr: 0.0100 Epoch 53/60 72/72 [==============================] - ETA: 0s - loss: 0.0782 - tversky: 0.9664 Epoch 00053: val_loss did not improve from 0.16694 72/72 [==============================] - 49s 682ms/step - loss: 0.0782 - tversky: 0.9664 - val_loss: 0.1891 - val_tversky: 0.8903 - lr: 0.0100 Epoch 54/60 72/72 [==============================] - ETA: 0s - loss: 0.0772 - tversky: 0.9670 Epoch 00054: val_loss did not improve from 0.16694 72/72 [==============================] - 51s 705ms/step - loss: 0.0772 - tversky: 0.9670 - val_loss: 0.1834 - val_tversky: 0.8957 - lr: 0.0100 Epoch 55/60 72/72 [==============================] - ETA: 0s - loss: 0.0771 - tversky: 0.9670 Epoch 00055: val_loss did not improve from 0.16694 72/72 [==============================] - 49s 686ms/step - loss: 0.0771 - tversky: 0.9670 - val_loss: 0.1821 - val_tversky: 0.8964 - lr: 0.0100 Epoch 56/60 72/72 [==============================] - ETA: 0s - loss: 0.0759 - tversky: 0.9678 Epoch 00056: val_loss did not improve from 0.16694 72/72 [==============================] - 49s 683ms/step - loss: 0.0759 - tversky: 0.9678 - val_loss: 0.1770 - val_tversky: 0.8999 - lr: 0.0100 Epoch 57/60 72/72 [==============================] - ETA: 0s - loss: 0.0760 - tversky: 0.9677 Epoch 00057: val_loss did not improve from 0.16694 72/72 [==============================] - 49s 686ms/step - loss: 0.0760 - tversky: 0.9677 - val_loss: 0.1794 - val_tversky: 0.8983 - lr: 0.0100 Epoch 58/60 72/72 [==============================] - ETA: 0s - loss: 0.0753 - tversky: 0.9681 Epoch 00058: val_loss did not improve from 0.16694 Epoch 00058: ReduceLROnPlateau reducing learning rate to 0.0019999999552965165. 72/72 [==============================] - 51s 709ms/step - loss: 0.0753 - tversky: 0.9681 - val_loss: 0.1791 - val_tversky: 0.8983 - lr: 0.0100 Epoch 00058: early stopping
# saving model achitecture in json file
seg_model_json = seg_model.to_json()
with open("ResUNet-seg-model.json", "w") as json_file:
json_file.write(seg_model_json)
h.history.keys()
dict_keys(['loss', 'tversky', 'val_loss', 'val_tversky', 'lr'])
plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
plt.plot(h.history['loss']);
plt.plot(h.history['val_loss']);
plt.title("SEG Model focal tversky Loss");
plt.ylabel("focal tversky loss");
plt.xlabel("Epochs");
plt.legend(['train', 'val']);
plt.subplot(1,2,2)
plt.plot(h.history['tversky']);
plt.plot(h.history['val_tversky']);
plt.title("SEG Model tversky score");
plt.ylabel("tversky Accuracy");
plt.xlabel("Epochs");
plt.legend(['train', 'val']);
test_ids = list(X_test.image_path)
test_mask = list(X_test.mask_path)
test_data = DataGenerator(test_ids, test_mask)
_, tv = seg_model.evaluate(test_data)
print("Segmentation tversky is {:.2f}%".format(tv*100))
6/6 [==============================] - 2s 203ms/step - loss: 0.1797 - tversky: 0.8982 Segmentation tversky is 89.82%
def prediction(test, model, model_seg):
'''
Predcition function which takes dataframe containing ImageID as Input and perform 2 type of prediction on the image
Initially, image is passed through the classification network which predicts whether the image has defect or not, if the model
is 99% sure that the image has no defect, then the image is labeled as no-defect, if the model is not sure, it passes the image to the
segmentation network, it again checks if the image has defect or not, if it has defect, then the type and location of defect is found
'''
# empty list to store results
mask, image_id, has_mask = [], [], []
#itetrating through each image in test data
for i in test.image_path:
img = io.imread(i)
#normalizing
img = img *1./255.
#reshaping
img = cv2.resize(img, (256,256))
# converting img into array
img = np.array(img, dtype=np.float64)
#reshaping the image from 256,256,3 to 1,256,256,3
img = np.reshape(img, (1,256,256,3))
#making prediction for tumor in image
is_defect = model.predict(img)
#if tumour is not present we append the details of the image to the list
if np.argmax(is_defect)==0:
image_id.append(i)
has_mask.append(0)
mask.append('No mask :)')
continue
#Creating a empty array of shape 1,256,256,1
X = np.empty((1,256,256,3))
# read the image
img = io.imread(i)
#resizing the image and coverting them to array of type float64
img = cv2.resize(img, (256,256))
img = np.array(img, dtype=np.float64)
# standardising the image
img -= img.mean()
img /= img.std()
#converting the shape of image from 256,256,3 to 1,256,256,3
X[0,] = img
#make prediction of mask
predict = model_seg.predict(X)
# if sum of predicted mask is 0 then there is not tumour
if predict.round().astype(int).sum()==0:
image_id.append(i)
has_mask.append(0)
mask.append('No mask :)')
else:
#if the sum of pixel values are more than 0, then there is tumour
image_id.append(i)
has_mask.append(1)
mask.append(predict)
return pd.DataFrame({'image_path': image_id,'predicted_mask': mask,'has_mask': has_mask})
# making prediction
df_pred = prediction(test, model, seg_model)
df_pred
2022-06-10 20:35:19.045018: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:112] Plugin optimizer for device_type GPU is enabled. 2022-06-10 20:35:21.109970: I tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc:112] Plugin optimizer for device_type GPU is enabled.
| image_path | predicted_mask | has_mask | |
|---|---|---|---|
| 0 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | No mask :) | 0 |
| 1 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | No mask :) | 0 |
| 2 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | No mask :) | 0 |
| 3 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | [[[[6.004276e-06], [2.7318297e-06], [3.3004778... | 1 |
| 4 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | No mask :) | 0 |
| ... | ... | ... | ... |
| 585 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | No mask :) | 0 |
| 586 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | No mask :) | 0 |
| 587 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | No mask :) | 0 |
| 588 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | No mask :) | 0 |
| 589 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | No mask :) | 0 |
590 rows × 3 columns
# merging original and prediction df
df_pred = test.merge(df_pred, on='image_path')
df_pred.head(10)
| image_path | mask_path | mask | predicted_mask | has_mask | |
|---|---|---|---|---|---|
| 0 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 | No mask :) | 0 |
| 1 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 | No mask :) | 0 |
| 2 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 | No mask :) | 0 |
| 3 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 1 | [[[[6.004276e-06], [2.7318297e-06], [3.3004778... | 1 |
| 4 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 | No mask :) | 0 |
| 5 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 1 | [[[[8.094946e-06], [1.7476865e-06], [1.3222636... | 1 |
| 6 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 1 | [[[[6.8694176e-06], [1.1246308e-06], [2.611810... | 1 |
| 7 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 | No mask :) | 0 |
| 8 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 1 | No mask :) | 0 |
| 9 | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | brain_dataset/lgg-mri-segmentation/kaggle_3m/T... | 0 | No mask :) | 0 |
#visualizing prediction
count = 0
fig, axs = plt.subplots(15,5, figsize=(30,70))
for i in range(len(df_pred)):
if df_pred.has_mask[i]==1 and count<15:
#read mri images
img = io.imread(df_pred.image_path[i])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
axs[count][0].imshow(img)
axs[count][0].title.set_text('Brain MRI')
#read original mask
mask = io.imread(df_pred.mask_path[i])
axs[count][1].imshow(mask)
axs[count][1].title.set_text('Original Mask')
#read predicted mask
pred = np.array(df_pred.predicted_mask[i]).squeeze().round()
axs[count][2].imshow(pred)
axs[count][2].title.set_text('AI predicted mask')
#overlay original mask with MRI
img[mask==255] = (255,0,0)
axs[count][3].imshow(img)
axs[count][3].title.set_text('Brain MRI with original mask (Ground Truth)')
#overlay predicted mask and MRI
img_ = io.imread(df_pred.image_path[i])
img_ = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB)
img_[pred==1] = (0,255,150)
axs[count][4].imshow(img_)
axs[count][4].title.set_text('MRI with AI PREDICTED MASK')
count +=1
if (count==15):
break
fig.tight_layout()
the predictions made by AI are almost correct :)